From 514bfe27e4acc864da4e0fb1f21de555c7f92218 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 20 Jun 2025 12:38:36 +0200 Subject: [PATCH 1/5] sqldb: fix sqlfluff complaints for migrations --- .sqlfluff | 17 + tapdb/sqlc/migrations/000001_macaroons.up.sql | 2 +- tapdb/sqlc/migrations/000002_assets.up.sql | 133 +++-- tapdb/sqlc/migrations/000003_addrs.up.sql | 12 +- tapdb/sqlc/migrations/000004_mssmt.up.sql | 20 +- tapdb/sqlc/migrations/000005_transfers.up.sql | 64 +-- .../sqlc/migrations/000006_addr_event.up.sql | 24 +- .../sqlc/migrations/000007_universe.down.sql | 2 +- tapdb/sqlc/migrations/000007_universe.up.sql | 92 ++-- .../000008_universe_events_timestamp.up.sql | 3 +- .../000009_universe_configs.down.sql | 2 +- .../migrations/000009_universe_configs.up.sql | 10 +- .../migrations/000010_universe_stats.down.sql | 2 +- .../migrations/000010_universe_stats.up.sql | 16 +- .../sqlc/migrations/000011_transfers.down.sql | 2 +- tapdb/sqlc/migrations/000011_transfers.up.sql | 4 +- .../migrations/000012_anchor_txid.down.sql | 2 +- .../sqlc/migrations/000012_anchor_txid.up.sql | 60 ++- ...00013_universe_fed_proof_sync_log.down.sql | 5 +- .../000013_universe_fed_proof_sync_log.up.sql | 17 +- .../migrations/000014_multiverse_tree.up.sql | 80 +-- .../migrations/000015_asset_witnesses.up.sql | 5 +- .../000016_tapscript_trees.down.sql | 2 +- .../migrations/000016_tapscript_trees.up.sql | 36 +- ...000017_seedling_script_group_keys.down.sql | 2 +- .../000017_seedling_script_group_keys.up.sql | 15 +- ...9_managed_utxo_commitment_version.down.sql | 2 +- ...019_managed_utxo_commitment_version.up.sql | 2 +- .../migrations/000020_asset_unique_key.up.sql | 156 +++--- ..._transfer_outputs_proof_delivered.down.sql | 2 +- .../000023_multiverse_tree_re_apply.up.sql | 83 +-- ...iverse_optimization_indexes_queries.up.sql | 16 +- tapdb/sqlc/migrations/000025_burns.up.sql | 12 +- ...asset_group_version_customsubtree.down.sql | 26 +- ...6_asset_group_version_customsubtree.up.sql | 41 +- .../000027_better_universe_stats.down.sql | 16 +- .../000027_better_universe_stats.up.sql | 42 +- .../000028_asset_meta_tlv_fields.down.sql | 2 +- .../000028_asset_meta_tlv_fields.up.sql | 4 +- .../000029_ignore_burn_universe.down.sql | 40 +- .../000029_ignore_burn_universe.up.sql | 147 +++--- ...00030_mint_anchor_uni_commitments.down.sql | 2 +- .../000030_mint_anchor_uni_commitments.up.sql | 12 +- .../migrations/000031_ignore_tree.down.sql | 93 ++-- .../sqlc/migrations/000031_ignore_tree.up.sql | 91 ++-- .../000032_asset_transfer_label.down.sql | 2 +- .../000032_asset_transfer_label.up.sql | 2 +- ...34_script_key_drop_declared_known.down.sql | 1 - ...t_transfers_skip_anchor_broadcast.down.sql | 2 +- ...set_transfers_skip_anchor_broadcast.up.sql | 3 +- ...00036_mint_supply_commit_key_refs.down.sql | 2 +- .../000036_mint_supply_commit_key_refs.up.sql | 6 +- ...0037_insert_asset_burns_migration.down.sql | 14 +- ...000037_insert_asset_burns_migration.up.sql | 14 +- tapdb/sqlc/models.go | 4 +- tapdb/sqlc/schemas/generated_schema.sql | 488 ++++++++++-------- 56 files changed, 1142 insertions(+), 814 deletions(-) create mode 100644 .sqlfluff diff --git a/.sqlfluff b/.sqlfluff new file mode 100644 index 000000000..d03b2a3a2 --- /dev/null +++ b/.sqlfluff @@ -0,0 +1,17 @@ +[sqlfluff] +dialect = sqlite +templater = raw + +processes = -1 + +exclude_rules = AM04, AM05, AL05, ST06 + +warnings = RF04 + +large_file_skip_byte_limit = 50000 + +[sqlfluff:indentation] +tab_space_size = 4 +indented_joins = False +indented_on_contents = True +allow_implicit_indents = False diff --git a/tapdb/sqlc/migrations/000001_macaroons.up.sql b/tapdb/sqlc/migrations/000001_macaroons.up.sql index 1fabae56a..a7c626b3f 100644 --- a/tapdb/sqlc/migrations/000001_macaroons.up.sql +++ b/tapdb/sqlc/migrations/000001_macaroons.up.sql @@ -1,4 +1,4 @@ CREATE TABLE IF NOT EXISTS macaroons ( id BLOB PRIMARY KEY, - root_key BLOB NOT NULL + root_key BLOB NOT NULL ); diff --git a/tapdb/sqlc/migrations/000002_assets.up.sql b/tapdb/sqlc/migrations/000002_assets.up.sql index 7c108462e..866c7f505 100644 --- a/tapdb/sqlc/migrations/000002_assets.up.sql +++ b/tapdb/sqlc/migrations/000002_assets.up.sql @@ -28,7 +28,7 @@ CREATE TABLE IF NOT EXISTS genesis_points ( -- TODO(roasbeef): just need the input index here instead? prev_out BLOB UNIQUE NOT NULL, - anchor_tx_id BIGINT REFERENCES chain_txns(txn_id) + anchor_tx_id BIGINT REFERENCES chain_txns (txn_id) ); -- assets_meta is a table that holds all the metadata information for genesis @@ -37,7 +37,7 @@ CREATE TABLE IF NOT EXISTS genesis_points ( CREATE TABLE IF NOT EXISTS assets_meta ( meta_id INTEGER PRIMARY KEY, - meta_data_hash BLOB UNIQUE CHECK(length(meta_data_hash) = 32), + meta_data_hash BLOB UNIQUE CHECK (length(meta_data_hash) = 32), -- TODO(roasbeef): also have other opque blob here for future fields? meta_data_blob BLOB, @@ -56,7 +56,7 @@ CREATE TABLE IF NOT EXISTS genesis_assets ( asset_tag TEXT NOT NULL, - meta_data_id BIGINT REFERENCES assets_meta(meta_id), + meta_data_id BIGINT REFERENCES assets_meta (meta_id), output_index INTEGER NOT NULL, @@ -64,9 +64,9 @@ CREATE TABLE IF NOT EXISTS genesis_assets ( -- BIP PR asset_type SMALLINT NOT NULL, - genesis_point_id BIGINT NOT NULL REFERENCES genesis_points(genesis_id) + genesis_point_id BIGINT NOT NULL REFERENCES genesis_points (genesis_id) ); -CREATE INDEX IF NOT EXISTS asset_ids on genesis_assets(asset_id); +CREATE INDEX IF NOT EXISTS asset_ids ON genesis_assets (asset_id); -- internal_keys is the set of public keys managed and used by the daemon. The -- full KeyLocator is stored so we can use these keys without actually storing @@ -76,7 +76,7 @@ CREATE TABLE IF NOT EXISTS internal_keys ( -- We'll always store the full 33-byte key on disk, to make sure we're -- retaining full information. - raw_key BLOB NOT NULL UNIQUE CHECK(length(raw_key) = 33), + raw_key BLOB NOT NULL UNIQUE CHECK (length(raw_key) = 33), key_family INTEGER NOT NULL, @@ -90,15 +90,16 @@ CREATE TABLE IF NOT EXISTS internal_keys ( CREATE TABLE IF NOT EXISTS asset_groups ( group_id INTEGER PRIMARY KEY, - tweaked_group_key BLOB UNIQUE NOT NULL CHECK(length(tweaked_group_key) = 33), + tweaked_group_key BLOB UNIQUE NOT NULL + CHECK (length(tweaked_group_key) = 33), tapscript_root BLOB, -- TODO(roasbeef): also need to mix in output index here? to derive the -- genesis key? - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), - genesis_point_id BIGINT NOT NULL REFERENCES genesis_points(genesis_id) + genesis_point_id BIGINT NOT NULL REFERENCES genesis_points (genesis_id) ); -- asset_group_witnesses stores the set of signatures/witness stacks for an @@ -114,9 +115,10 @@ CREATE TABLE IF NOT EXISTS asset_group_witnesses ( witness_stack BLOB NOT NULL, -- TODO(roasbeef): not needed since already in assets row? - gen_asset_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id) UNIQUE, + gen_asset_id BIGINT NOT NULL + REFERENCES genesis_assets (gen_asset_id) UNIQUE, - group_key_id BIGINT NOT NULL REFERENCES asset_groups(group_id) + group_key_id BIGINT NOT NULL REFERENCES asset_groups (group_id) ); -- managed_utxos is the set of UTXOs managed by tapd. These UTXOs may commit @@ -132,10 +134,10 @@ CREATE TABLE IF NOT EXISTS managed_utxos ( -- 64 bit issues? amt_sats BIGINT NOT NULL, - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- The Taproot Asset root commitment hash. - taproot_asset_root BLOB NOT NULL CHECK(length(taproot_asset_root) = 32), + taproot_asset_root BLOB NOT NULL CHECK (length(taproot_asset_root) = 32), -- The serialized tapscript sibling preimage. If this is empty then the -- Taproot Asset root commitment is equal to the merkle_root below. @@ -145,16 +147,16 @@ CREATE TABLE IF NOT EXISTS managed_utxos ( -- corresponds to the Taproot Asset root commitment hash. -- -- TODO(roasbeef): can then reconstruct on start up to ensure matches up - merkle_root BLOB NOT NULL CHECK(length(merkle_root) = 32), + merkle_root BLOB NOT NULL CHECK (length(merkle_root) = 32), - txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), -- The identity of the application that currently has a lease on this UTXO. -- If NULL, then the UTXO is not currently leased. A lease means that the -- UTXO is being reserved/locked to be spent in an upcoming transaction and -- that it should not be available for coin selection through any of the -- wallet RPCs. - lease_owner BLOB CHECK(length(lease_owner) = 32), + lease_owner BLOB CHECK (length(lease_owner) = 32), -- The absolute expiry of the lease in seconds as a Unix timestamp. If the -- expiry is NULL or the timestamp is in the past, then the lease is not @@ -167,11 +169,12 @@ CREATE TABLE IF NOT EXISTS script_keys ( -- The actual internal key here that we hold the private key for. Applying -- the tweak to this gives us the tweaked_script_key. - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- The script key after applying the tweak. This is what goes directly in -- the asset TLV. - tweaked_script_key BLOB NOT NULL UNIQUE CHECK(length(tweaked_script_key) = 33), + tweaked_script_key BLOB NOT NULL UNIQUE + CHECK (length(tweaked_script_key) = 33), -- An optional tweak for the script_key. If NULL, the raw_key may be -- tweaked BIP-0086 style. @@ -185,15 +188,15 @@ CREATE TABLE IF NOT EXISTS script_keys ( -- spend the asset. CREATE TABLE IF NOT EXISTS assets ( asset_id INTEGER PRIMARY KEY, - - genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + + genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), version INTEGER NOT NULL, - script_key_id BIGINT NOT NULL REFERENCES script_keys(script_key_id), + script_key_id BIGINT NOT NULL REFERENCES script_keys (script_key_id), -- TODO(roasbeef): don't need this after all? - asset_group_witness_id BIGINT REFERENCES asset_group_witnesses(witness_id), + asset_group_witness_id BIGINT REFERENCES asset_group_witnesses (witness_id), -- TODO(roasbeef): make into enum? script_version INTEGER NOT NULL, @@ -210,15 +213,15 @@ CREATE TABLE IF NOT EXISTS assets ( split_commitment_root_value BIGINT, - anchor_utxo_id BIGINT REFERENCES managed_utxos(utxo_id), - + anchor_utxo_id BIGINT REFERENCES managed_utxos (utxo_id), + -- A boolean that indicates that the asset was spent. This is only -- set for assets that were transferred in an active manner (as part of an -- user initiated transfer). Passive assets that are just re-anchored are -- updated in-place. spent BOOLEAN NOT NULL DEFAULT FALSE, - - UNIQUE(asset_id, genesis_id, script_key_id) + + UNIQUE (asset_id, genesis_id, script_key_id) ); -- asset_witnesses stores the set of input witnesses for the latest state of an @@ -227,7 +230,7 @@ CREATE TABLE IF NOT EXISTS assets ( CREATE TABLE IF NOT EXISTS asset_witnesses ( witness_id INTEGER PRIMARY KEY, - asset_id BIGINT NOT NULL REFERENCES assets(asset_id) ON DELETE CASCADE, + asset_id BIGINT NOT NULL REFERENCES assets (asset_id) ON DELETE CASCADE, prev_out_point BLOB NOT NULL, @@ -247,7 +250,7 @@ CREATE TABLE IF NOT EXISTS asset_proofs ( -- We enforce that this value is unique so we can use an UPSERT to update a -- proof file that already exists. - asset_id BIGINT NOT NULL REFERENCES assets(asset_id) UNIQUE, + asset_id BIGINT NOT NULL REFERENCES assets (asset_id) UNIQUE, -- TODO(roasbef): store the merkle root separately? then can refer back to -- for all other files @@ -260,7 +263,8 @@ CREATE TABLE IF NOT EXISTS asset_proofs ( -- minting transaction which once signed and broadcast will actually create the -- assets. CREATE TABLE IF NOT EXISTS asset_minting_batches ( - batch_id INTEGER PRIMARY KEY REFERENCES internal_keys(key_id), + batch_id INTEGER PRIMARY KEY -- noqa: LL01 + REFERENCES internal_keys (key_id), -- TODO(guggero): Use BIGINT. -- TODO(roasbeef): make into proper enum table or use check to ensure -- proper values @@ -270,13 +274,14 @@ CREATE TABLE IF NOT EXISTS asset_minting_batches ( change_output_index INTEGER, - genesis_id BIGINT REFERENCES genesis_points(genesis_id), + genesis_id BIGINT REFERENCES genesis_points (genesis_id), height_hint INTEGER NOT NULL, creation_time_unix TIMESTAMP NOT NULL ); -CREATE INDEX IF NOT EXISTS batch_state_lookup on asset_minting_batches (batch_state); +CREATE INDEX IF NOT EXISTS batch_state_lookup +ON asset_minting_batches (batch_state); -- asset_seedlings are budding assets: the contain the base asset information -- need to create an asset, but doesn't yet have a genesis point. @@ -293,15 +298,15 @@ CREATE TABLE IF NOT EXISTS asset_seedlings ( asset_supply BIGINT NOT NULL, - asset_meta_id BIGINT NOT NULL REFERENCES assets_meta(meta_id), + asset_meta_id BIGINT NOT NULL REFERENCES assets_meta (meta_id), emission_enabled BOOLEAN NOT NULL, - batch_id BIGINT NOT NULL REFERENCES asset_minting_batches(batch_id), + batch_id BIGINT NOT NULL REFERENCES asset_minting_batches (batch_id), - group_genesis_id BIGINT REFERENCES genesis_assets(gen_asset_id), + group_genesis_id BIGINT REFERENCES genesis_assets (gen_asset_id), - group_anchor_id BIGINT REFERENCES asset_seedlings(seedling_id) + group_anchor_id BIGINT REFERENCES asset_seedlings (seedling_id) ); -- TODO(roasbeef): need on delete cascade for all these? @@ -313,31 +318,45 @@ CREATE TABLE IF NOT EXISTS asset_seedlings ( -- points, to the internal key that reference the batch, then restricted -- for internal keys that match our main batch key. CREATE VIEW genesis_info_view AS - SELECT - gen_asset_id, asset_id, asset_tag, assets_meta.meta_data_hash meta_hash, - output_index, asset_type, genesis_points.prev_out prev_out, block_height - FROM genesis_assets - -- We do a LEFT JOIN here, as not every asset has a set of - -- metadata that matches the asset. - LEFT JOIN assets_meta - ON genesis_assets.meta_data_id = assets_meta.meta_id - JOIN genesis_points - ON genesis_assets.genesis_point_id = genesis_points.genesis_id - LEFT JOIN chain_txns - ON genesis_points.anchor_tx_id = chain_txns.txn_id; +SELECT + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash AS meta_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out, + chain_txns.block_height +FROM genesis_assets +-- We do a LEFT JOIN here, as not every asset has a set of +-- metadata that matches the asset. +LEFT JOIN assets_meta + ON genesis_assets.meta_data_id = assets_meta.meta_id +JOIN genesis_points + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +LEFT JOIN chain_txns + ON genesis_points.anchor_tx_id = chain_txns.txn_id; -- This view is used to perform a series of joins that allow us to extract -- the group key information, as well as the group sigs for the series of -- assets we care about. We obtain only the assets found in the batch -- above, with the WHERE query at the bottom. CREATE VIEW key_group_info_view AS - SELECT - witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family, - substr(tweaked_group_key, 2) AS x_only_group_key - FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id - WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info_view); +SELECT + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family, + substr(grp.tweaked_group_key, 2) AS x_only_group_key +FROM asset_group_witnesses AS wit +JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id +JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id +WHERE wit.gen_asset_id IN ( + SELECT giv.gen_asset_id FROM genesis_info_view AS giv +); diff --git a/tapdb/sqlc/migrations/000003_addrs.up.sql b/tapdb/sqlc/migrations/000003_addrs.up.sql index cc08f3f71..3486612b6 100644 --- a/tapdb/sqlc/migrations/000003_addrs.up.sql +++ b/tapdb/sqlc/migrations/000003_addrs.up.sql @@ -12,7 +12,7 @@ CREATE TABLE IF NOT EXISTS addrs ( -- genesis_asset_id points to the asset genesis of the asset we want to -- send/recv. - genesis_asset_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + genesis_asset_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), -- group_key is the raw blob of the group key. For assets w/o a group key, -- this field will be NULL. @@ -20,11 +20,11 @@ CREATE TABLE IF NOT EXISTS addrs ( -- script_key_id points to the internal key that we created to serve as the -- script key to be able to receive this asset. - script_key_id BIGINT NOT NULL REFERENCES script_keys(script_key_id), + script_key_id BIGINT NOT NULL REFERENCES script_keys (script_key_id), -- taproot_key_id points to the internal key that we'll use to serve as the -- taproot internal key to receive this asset. - taproot_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + taproot_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- tapscript_sibling is the serialized tapscript sibling preimage that -- should be committed to in the taproot output alongside the Taproot Asset @@ -34,10 +34,12 @@ CREATE TABLE IF NOT EXISTS addrs ( -- taproot_output_key is the tweaked taproot output key that assets must -- be sent to on chain to be received, represented as a 32-byte x-only -- public key. - taproot_output_key BLOB NOT NULL UNIQUE CHECK(length(taproot_output_key) = 32), + taproot_output_key BLOB NOT NULL UNIQUE CHECK ( + length(taproot_output_key) = 32 + ), -- amount is the amount of asset we want to receive. - amount BIGINT NOT NULL, + amount BIGINT NOT NULL, -- asset_type is the type of asset we want to receive. asset_type SMALLINT NOT NULL, diff --git a/tapdb/sqlc/migrations/000004_mssmt.up.sql b/tapdb/sqlc/migrations/000004_mssmt.up.sql index 9f35ddd7b..004b349f6 100644 --- a/tapdb/sqlc/migrations/000004_mssmt.up.sql +++ b/tapdb/sqlc/migrations/000004_mssmt.up.sql @@ -1,18 +1,18 @@ CREATE TABLE IF NOT EXISTS mssmt_nodes ( -- hash_key is the hash key by which we reference all nodes. hash_key BLOB NOT NULL, - + -- l_hash_key is the hash key of the left child or NULL. If this is a -- branch then either l_hash_key or r_hash_key is not NULL. l_hash_key BLOB, - + -- r_hash_key is the hash key of the right child or NULL. If this is a -- branch then either l_hash_key or r_hash_key is not NULL. r_hash_key BLOB, - + -- key is the leaf key if this is a compacted leaf node. key BLOB, - + -- value is the leaf value if this is a leaf node. value BLOB, @@ -29,8 +29,12 @@ CREATE TABLE IF NOT EXISTS mssmt_nodes ( PRIMARY KEY (hash_key, namespace) ); -CREATE INDEX IF NOT EXISTS mssmt_nodes_l_hash_key_idx ON mssmt_nodes (l_hash_key); -CREATE INDEX IF NOT EXISTS mssmt_nodes_r_hash_key_idx ON mssmt_nodes (r_hash_key); +CREATE INDEX IF NOT EXISTS mssmt_nodes_l_hash_key_idx ON mssmt_nodes ( + l_hash_key +); +CREATE INDEX IF NOT EXISTS mssmt_nodes_r_hash_key_idx ON mssmt_nodes ( + r_hash_key +); CREATE TABLE IF NOT EXISTS mssmt_roots ( -- namespace allows us to store several root hash pointers for distinct @@ -40,5 +44,7 @@ CREATE TABLE IF NOT EXISTS mssmt_roots ( -- root_hash points to the root hash node of the MS-SMT tree. root_hash BLOB NOT NULL, - FOREIGN KEY (namespace, root_hash) REFERENCES mssmt_nodes (namespace, hash_key) ON DELETE CASCADE + FOREIGN KEY (namespace, root_hash) REFERENCES mssmt_nodes ( + namespace, hash_key + ) ON DELETE CASCADE ); diff --git a/tapdb/sqlc/migrations/000005_transfers.up.sql b/tapdb/sqlc/migrations/000005_transfers.up.sql index b123db2dd..1d73e6d82 100644 --- a/tapdb/sqlc/migrations/000005_transfers.up.sql +++ b/tapdb/sqlc/migrations/000005_transfers.up.sql @@ -1,54 +1,54 @@ CREATE TABLE IF NOT EXISTS asset_transfers ( - id INTEGER PRIMARY KEY, + id INTEGER PRIMARY KEY, height_hint INTEGER NOT NULL, - - anchor_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + anchor_txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), transfer_time_unix TIMESTAMP NOT NULL ); CREATE INDEX IF NOT EXISTS transfer_time_idx - ON asset_transfers (transfer_time_unix); +ON asset_transfers (transfer_time_unix); CREATE INDEX IF NOT EXISTS transfer_txn_idx - ON asset_transfers (anchor_txn_id); +ON asset_transfers (anchor_txn_id); CREATE TABLE IF NOT EXISTS asset_transfer_inputs ( input_id INTEGER PRIMARY KEY, - - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), - + + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + anchor_point BLOB NOT NULL, - + asset_id BLOB NOT NULL, - + script_key BLOB NOT NULL, - + amount BIGINT NOT NULL ); CREATE INDEX IF NOT EXISTS transfer_inputs_idx - ON asset_transfer_inputs (transfer_id); +ON asset_transfer_inputs (transfer_id); CREATE TABLE IF NOT EXISTS asset_transfer_outputs ( output_id INTEGER PRIMARY KEY, - - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), - - anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), - - script_key BIGINT NOT NULL REFERENCES script_keys(script_key_id), - + + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + + anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), + + script_key BIGINT NOT NULL REFERENCES script_keys (script_key_id), + script_key_local BOOL NOT NULL, - + amount BIGINT NOT NULL, asset_version INTEGER NOT NULL, - + serialized_witnesses BLOB, - + split_commitment_root_hash BLOB, - + split_commitment_root_value BIGINT, - + proof_suffix BLOB, num_passive_assets INTEGER NOT NULL, @@ -61,26 +61,26 @@ CREATE TABLE IF NOT EXISTS asset_transfer_outputs ( proof_courier_addr BLOB ); CREATE INDEX IF NOT EXISTS transfer_outputs_idx - ON asset_transfer_outputs (transfer_id); +ON asset_transfer_outputs (transfer_id); CREATE TABLE IF NOT EXISTS receiver_proof_transfer_attempts ( proof_locator_hash BLOB NOT NULL, time_unix TIMESTAMP NOT NULL ); -CREATE INDEX IF NOT EXISTS proof_locator_hash_index - ON receiver_proof_transfer_attempts (proof_locator_hash); +CREATE INDEX IF NOT EXISTS proof_locator_hash_index +ON receiver_proof_transfer_attempts (proof_locator_hash); -- passive_assets is a table that stores the information needed to -- re-anchor a passive asset. CREATE TABLE IF NOT EXISTS passive_assets ( passive_id INTEGER PRIMARY KEY, - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + + asset_id BIGINT NOT NULL REFERENCES assets (asset_id), - asset_id BIGINT NOT NULL REFERENCES assets(asset_id), - - new_anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), + new_anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), script_key BLOB NOT NULL, @@ -91,4 +91,4 @@ CREATE TABLE IF NOT EXISTS passive_assets ( new_proof BLOB ); CREATE INDEX IF NOT EXISTS passive_assets_idx - ON passive_assets (transfer_id); +ON passive_assets (transfer_id); diff --git a/tapdb/sqlc/migrations/000006_addr_event.up.sql b/tapdb/sqlc/migrations/000006_addr_event.up.sql index 4539df996..5b25f09bd 100644 --- a/tapdb/sqlc/migrations/000006_addr_event.up.sql +++ b/tapdb/sqlc/migrations/000006_addr_event.up.sql @@ -7,14 +7,14 @@ CREATE TABLE IF NOT EXISTS addr_events ( creation_time TIMESTAMP NOT NULL, -- addr_id is the reference to the address this event was emitted for. - addr_id BIGINT NOT NULL REFERENCES addrs(id), + addr_id BIGINT NOT NULL REFERENCES addrs (id), -- status is the status of the inbound asset. status SMALLINT NOT NULL CHECK (status IN (0, 1, 2, 3)), -- chain_txn_id is a reference to the chain transaction that has the Taproot -- output for this event. - chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), -- chain_txn_output_index is the index of the on-chain output (of the -- transaction referenced by chain_txn_id) that houses the Taproot Asset @@ -23,20 +23,20 @@ CREATE TABLE IF NOT EXISTS addr_events ( -- managed_utxo_id is a reference to the managed UTXO the internal wallet -- tracks with on-chain funds that belong to us. - managed_utxo_id BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), + managed_utxo_id BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), -- asset_proof_id is a reference to the proof associated with this asset -- event. - asset_proof_id BIGINT REFERENCES asset_proofs(proof_id), - + asset_proof_id BIGINT REFERENCES asset_proofs (proof_id), + -- asset_id is a reference to the asset once we have taken custody of it. -- This will only be set once the proofs were imported successfully and the -- event is in the status complete. - asset_id BIGINT REFERENCES assets(asset_id), - - UNIQUE(addr_id, chain_txn_id, chain_txn_output_index) + asset_id BIGINT REFERENCES assets (asset_id), + + UNIQUE (addr_id, chain_txn_id, chain_txn_output_index) ); -CREATE INDEX IF NOT EXISTS creation_time_idx ON addr_events(creation_time); -CREATE INDEX IF NOT EXISTS status_idx ON addr_events(status); -CREATE INDEX IF NOT EXISTS asset_proof_id_idx ON addr_events(asset_proof_id); -CREATE INDEX IF NOT EXISTS asset_id_idx ON addr_events(asset_id); +CREATE INDEX IF NOT EXISTS creation_time_idx ON addr_events (creation_time); +CREATE INDEX IF NOT EXISTS status_idx ON addr_events (status); +CREATE INDEX IF NOT EXISTS asset_proof_id_idx ON addr_events (asset_proof_id); +CREATE INDEX IF NOT EXISTS asset_id_idx ON addr_events (asset_id); diff --git a/tapdb/sqlc/migrations/000007_universe.down.sql b/tapdb/sqlc/migrations/000007_universe.down.sql index e44ba3dfa..3bdc9e7cc 100644 --- a/tapdb/sqlc/migrations/000007_universe.down.sql +++ b/tapdb/sqlc/migrations/000007_universe.down.sql @@ -10,4 +10,4 @@ DROP INDEX IF EXISTS universe_events_type_idx; DROP INDEX IF EXISTS universe_roots_asset_id_idx; DROP INDEX IF EXISTS universe_roots_group_key_idx; DROP INDEX IF EXISTS universe_leaves_key_idx; -DROP INDEX IF EXISTS universe_leaves_namespace; \ No newline at end of file +DROP INDEX IF EXISTS universe_leaves_namespace; diff --git a/tapdb/sqlc/migrations/000007_universe.up.sql b/tapdb/sqlc/migrations/000007_universe.up.sql index df7521c14..454efc7d1 100644 --- a/tapdb/sqlc/migrations/000007_universe.up.sql +++ b/tapdb/sqlc/migrations/000007_universe.up.sql @@ -6,42 +6,52 @@ CREATE TABLE IF NOT EXISTS universe_roots ( -- root of the SMT is deleted temporarily before inserting a new root, then -- this constraint is violated as there's no longer a root that this -- universe tree can point to. - namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots ( + namespace + ) DEFERRABLE INITIALLY DEFERRED, asset_id BLOB, -- We use the 32 byte schnorr key here as this is what's used to derive the -- top-level Taproot Asset commitment key. - group_key BLOB CHECK(LENGTH(group_key) = 32), + group_key BLOB CHECK (LENGTH(group_key) = 32), -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')) + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')) ); -CREATE INDEX IF NOT EXISTS universe_roots_asset_id_idx ON universe_roots(asset_id); -CREATE INDEX IF NOT EXISTS universe_roots_group_key_idx ON universe_roots(group_key); +CREATE INDEX IF NOT EXISTS universe_roots_asset_id_idx ON universe_roots ( + asset_id +); +CREATE INDEX IF NOT EXISTS universe_roots_group_key_idx ON universe_roots ( + group_key +); CREATE TABLE IF NOT EXISTS universe_leaves ( id INTEGER PRIMARY KEY, - asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), - minting_point BLOB NOT NULL, + minting_point BLOB NOT NULL, - script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32), + script_key_bytes BLOB NOT NULL CHECK (LENGTH(script_key_bytes) = 32), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), leaf_node_key BLOB, - + leaf_node_namespace VARCHAR NOT NULL, - UNIQUE(minting_point, script_key_bytes) + UNIQUE (minting_point, script_key_bytes) ); -CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves(leaf_node_key); -CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves(leaf_node_namespace); +CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves ( + leaf_node_key +); +CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves ( + leaf_node_namespace +); CREATE TABLE IF NOT EXISTS universe_servers ( id INTEGER PRIMARY KEY, @@ -52,44 +62,56 @@ CREATE TABLE IF NOT EXISTS universe_servers ( last_sync_time TIMESTAMP NOT NULL - -- TODO(roasbeef): can also add stuff like filters re which items to sync, - -- etc? also sync mode, ones that should get everything pushed, etc +-- TODO(roasbeef): can also add stuff like filters re which items to sync, +-- etc? also sync mode, ones that should get everything pushed, etc ); -CREATE INDEX IF NOT EXISTS universe_servers_host ON universe_servers(server_host); +CREATE INDEX IF NOT EXISTS universe_servers_host ON universe_servers ( + server_host +); CREATE TABLE IF NOT EXISTS universe_events ( event_id INTEGER PRIMARY KEY, - event_type VARCHAR NOT NULL CHECK (event_type IN ('SYNC', 'NEW_PROOF', 'NEW_ROOT')), + event_type VARCHAR NOT NULL CHECK ( + event_type IN ('SYNC', 'NEW_PROOF', 'NEW_ROOT') + ), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), -- TODO(roasbeef): also add which leaf was synced? event_time TIMESTAMP NOT NULL ); -CREATE INDEX IF NOT EXISTS universe_events_event_time_idx ON universe_events(event_time); -CREATE INDEX IF NOT EXISTS universe_events_type_idx ON universe_events(event_type); +CREATE INDEX IF NOT EXISTS universe_events_event_time_idx ON universe_events ( + event_time +); +CREATE INDEX IF NOT EXISTS universe_events_type_idx ON universe_events ( + event_type +); -- universe_stats is a view that gives us easy access to the total number of -- syncs and proofs for a given asset. CREATE VIEW universe_stats AS - SELECT - COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 ELSE NULL END) AS total_asset_syncs, - COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 ELSE NULL END) AS total_asset_proofs, - roots.asset_id, - roots.group_key - FROM universe_events u - JOIN universe_roots roots ON u.universe_root_id = roots.id - GROUP BY roots.asset_id, roots.group_key; +SELECT + roots.asset_id, + roots.group_key, + COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 END) + AS total_asset_syncs, + COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 END) + AS total_asset_proofs +FROM universe_events AS u +INNER JOIN universe_roots AS roots ON u.universe_root_id = roots.id +GROUP BY roots.asset_id, roots.group_key; -- This table contains global configuration for universe federation syncing. CREATE TABLE IF NOT EXISTS federation_global_sync_config ( -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL PRIMARY KEY CHECK(proof_type IN ('issuance', 'transfer')), + proof_type TEXT NOT NULL PRIMARY KEY CHECK ( + proof_type IN ('issuance', 'transfer') + ), -- This field is a boolean that indicates whether or not a universe of the -- given proof type should accept remote proof insertion via federation @@ -106,15 +128,15 @@ CREATE TABLE IF NOT EXISTS federation_global_sync_config ( CREATE TABLE IF NOT EXISTS federation_uni_sync_config ( -- This field contains the byte serialized ID of the asset to which this -- configuration is applicable - asset_id BLOB CHECK(length(asset_id) = 32) NULL, + asset_id BLOB CHECK (LENGTH(asset_id) = 32) NULL, -- This field contains the byte serialized compressed group key public key -- of the asset group to which this configuration is applicable. - group_key BLOB CHECK(LENGTH(group_key) = 33) NULL, + group_key BLOB CHECK (LENGTH(group_key) = 33) NULL, -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')), + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')), -- This field is a boolean that indicates whether or not the given universe -- should accept remote proof insertion via federation sync. @@ -126,10 +148,10 @@ CREATE TABLE IF NOT EXISTS federation_uni_sync_config ( -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ), -- Ensure that the universe identifier fields form a unique tuple. UNIQUE (asset_id, group_key, proof_type) -); \ No newline at end of file +); diff --git a/tapdb/sqlc/migrations/000008_universe_events_timestamp.up.sql b/tapdb/sqlc/migrations/000008_universe_events_timestamp.up.sql index fce1488c9..883796bf9 100644 --- a/tapdb/sqlc/migrations/000008_universe_events_timestamp.up.sql +++ b/tapdb/sqlc/migrations/000008_universe_events_timestamp.up.sql @@ -1,4 +1,5 @@ -- event_timestamp is the same as event_time but stored as a Unix timestamp -- to allow us to do calculations in queries. This is added as a separate -- field to make this change non-breaking. -ALTER TABLE universe_events ADD COLUMN event_timestamp BIGINT NOT NULL DEFAULT 0; +ALTER TABLE universe_events +ADD COLUMN event_timestamp BIGINT NOT NULL DEFAULT 0; diff --git a/tapdb/sqlc/migrations/000009_universe_configs.down.sql b/tapdb/sqlc/migrations/000009_universe_configs.down.sql index 4c405332d..1fd4d0165 100644 --- a/tapdb/sqlc/migrations/000009_universe_configs.down.sql +++ b/tapdb/sqlc/migrations/000009_universe_configs.down.sql @@ -1 +1 @@ -DROP TABLE IF EXISTS federation_uni_sync_config; \ No newline at end of file +DROP TABLE IF EXISTS federation_uni_sync_config; diff --git a/tapdb/sqlc/migrations/000009_universe_configs.up.sql b/tapdb/sqlc/migrations/000009_universe_configs.up.sql index a2de83713..5e401ec3e 100644 --- a/tapdb/sqlc/migrations/000009_universe_configs.up.sql +++ b/tapdb/sqlc/migrations/000009_universe_configs.up.sql @@ -9,15 +9,15 @@ CREATE TABLE IF NOT EXISTS federation_uni_sync_config ( -- This field contains the byte serialized ID of the asset to which this -- configuration is applicable. - asset_id BLOB CHECK(length(asset_id) = 32) NULL, + asset_id BLOB CHECK (length(asset_id) = 32) NULL, -- This field contains the byte serialized compressed group key public key -- of the asset group to which this configuration is applicable. - group_key BLOB CHECK(LENGTH(group_key) = 33) NULL, + group_key BLOB CHECK (length(group_key) = 33) NULL, -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')), + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')), -- This field is a boolean that indicates whether or not the given universe -- should accept remote proof insertion via federation sync. @@ -29,7 +29,7 @@ CREATE TABLE IF NOT EXISTS federation_uni_sync_config ( -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ) ); diff --git a/tapdb/sqlc/migrations/000010_universe_stats.down.sql b/tapdb/sqlc/migrations/000010_universe_stats.down.sql index 357bfd7e3..acdce9bbc 100644 --- a/tapdb/sqlc/migrations/000010_universe_stats.down.sql +++ b/tapdb/sqlc/migrations/000010_universe_stats.down.sql @@ -1,2 +1,2 @@ -- This file is empty on purpose. There is nothing to roll back for this --- migration. \ No newline at end of file +-- migration. diff --git a/tapdb/sqlc/migrations/000010_universe_stats.up.sql b/tapdb/sqlc/migrations/000010_universe_stats.up.sql index 7e01436fa..368e34c71 100644 --- a/tapdb/sqlc/migrations/000010_universe_stats.up.sql +++ b/tapdb/sqlc/migrations/000010_universe_stats.up.sql @@ -2,12 +2,14 @@ DROP VIEW universe_stats; CREATE VIEW universe_stats AS SELECT - COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 ELSE NULL END) AS total_asset_syncs, - COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 ELSE NULL END) AS total_asset_proofs, roots.asset_id, roots.group_key, - roots.proof_type -FROM universe_events u -JOIN universe_roots roots - ON u.universe_root_id = roots.id -GROUP BY roots.asset_id, roots.group_key, roots.proof_type; \ No newline at end of file + roots.proof_type, + COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 END) + AS total_asset_syncs, + COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 END) + AS total_asset_proofs +FROM universe_events AS u +INNER JOIN universe_roots AS roots + ON u.universe_root_id = roots.id +GROUP BY roots.asset_id, roots.group_key, roots.proof_type; diff --git a/tapdb/sqlc/migrations/000011_transfers.down.sql b/tapdb/sqlc/migrations/000011_transfers.down.sql index 8b1378917..0b5b05910 100644 --- a/tapdb/sqlc/migrations/000011_transfers.down.sql +++ b/tapdb/sqlc/migrations/000011_transfers.down.sql @@ -1 +1 @@ - +-- This file is empty on purpose. diff --git a/tapdb/sqlc/migrations/000011_transfers.up.sql b/tapdb/sqlc/migrations/000011_transfers.up.sql index 857da5acc..0554066ed 100644 --- a/tapdb/sqlc/migrations/000011_transfers.up.sql +++ b/tapdb/sqlc/migrations/000011_transfers.up.sql @@ -7,11 +7,11 @@ CREATE TABLE IF NOT EXISTS proof_transfer_log ( -- delivery to the transfer counterparty or receiving a proof from the -- transfer counterparty. Note that the transfer counterparty is usually -- the proof courier service. - transfer_type TEXT NOT NULL CHECK(transfer_type IN ('send', 'receive')), + transfer_type TEXT NOT NULL CHECK (transfer_type IN ('send', 'receive')), proof_locator_hash BLOB NOT NULL, time_unix TIMESTAMP NOT NULL ); CREATE INDEX IF NOT EXISTS proof_locator_hash_index -ON proof_transfer_log (proof_locator_hash); \ No newline at end of file +ON proof_transfer_log (proof_locator_hash); diff --git a/tapdb/sqlc/migrations/000012_anchor_txid.down.sql b/tapdb/sqlc/migrations/000012_anchor_txid.down.sql index 357bfd7e3..acdce9bbc 100644 --- a/tapdb/sqlc/migrations/000012_anchor_txid.down.sql +++ b/tapdb/sqlc/migrations/000012_anchor_txid.down.sql @@ -1,2 +1,2 @@ -- This file is empty on purpose. There is nothing to roll back for this --- migration. \ No newline at end of file +-- migration. diff --git a/tapdb/sqlc/migrations/000012_anchor_txid.up.sql b/tapdb/sqlc/migrations/000012_anchor_txid.up.sql index 305fc36f8..8eb050c65 100644 --- a/tapdb/sqlc/migrations/000012_anchor_txid.up.sql +++ b/tapdb/sqlc/migrations/000012_anchor_txid.up.sql @@ -8,32 +8,46 @@ DROP VIEW IF EXISTS genesis_info_view; -- points, to the internal key that reference the batch, then restricted -- for internal keys that match our main batch key. CREATE VIEW genesis_info_view AS - SELECT - gen_asset_id, asset_id, asset_tag, assets_meta.meta_data_hash meta_hash, - output_index, asset_type, genesis_points.prev_out prev_out, - chain_txns.txid anchor_txid, block_height - FROM genesis_assets - -- We do a LEFT JOIN here, as not every asset has a set of - -- metadata that matches the asset. - LEFT JOIN assets_meta - ON genesis_assets.meta_data_id = assets_meta.meta_id - JOIN genesis_points - ON genesis_assets.genesis_point_id = genesis_points.genesis_id - LEFT JOIN chain_txns - ON genesis_points.anchor_tx_id = chain_txns.txn_id; +SELECT + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash AS meta_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out, + chain_txns.txid AS anchor_txid, + chain_txns.block_height +FROM genesis_assets +-- We do a LEFT JOIN here, as not every asset has a set of +-- metadata that matches the asset. +LEFT JOIN assets_meta + ON genesis_assets.meta_data_id = assets_meta.meta_id +JOIN genesis_points + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +LEFT JOIN chain_txns + ON genesis_points.anchor_tx_id = chain_txns.txn_id; -- This view is used to perform a series of joins that allow us to extract -- the group key information, as well as the group sigs for the series of -- assets we care about. We obtain only the assets found in the batch -- above, with the WHERE query at the bottom. CREATE VIEW key_group_info_view AS - SELECT - witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family, - substr(tweaked_group_key, 2) AS x_only_group_key - FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id - WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info_view); \ No newline at end of file +SELECT + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family, + substr(grp.tweaked_group_key, 2) AS x_only_group_key +FROM asset_group_witnesses AS wit +JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id +JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id +WHERE wit.gen_asset_id IN ( + SELECT giv.gen_asset_id FROM genesis_info_view AS giv +); diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql index 42bdbfbb8..8c3bb1754 100644 --- a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.down.sql @@ -1,2 +1,3 @@ -DROP INDEX IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; -DROP TABLE IF EXISTS federation_proof_sync_log; \ No newline at end of file +DROP INDEX IF EXISTS +federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; +DROP TABLE IF EXISTS federation_proof_sync_log; diff --git a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql index e854f5fbc..70c908404 100644 --- a/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql +++ b/tapdb/sqlc/migrations/000013_universe_fed_proof_sync_log.up.sql @@ -4,7 +4,7 @@ CREATE TABLE IF NOT EXISTS federation_proof_sync_log ( id INTEGER PRIMARY KEY, -- The status of the proof sync attempt. - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), -- The timestamp of when the log entry for the associated proof was last -- updated. @@ -14,23 +14,24 @@ CREATE TABLE IF NOT EXISTS federation_proof_sync_log ( attempt_counter BIGINT NOT NULL DEFAULT 0, -- The direction of the proof sync attempt. - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), -- The ID of the subject proof leaf. - proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves (id), -- The ID of the universe that the proof leaf belongs to. - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), -- The ID of the server that the proof will be/was synced to. - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); -- Create a unique index on table federation_proof_sync_log -CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id -ON federation_proof_sync_log ( +CREATE UNIQUE INDEX +federation_proof_sync_log_unique_index_proof_leaf_id_servers_id ON +federation_proof_sync_log ( sync_direction, proof_leaf_id, universe_root_id, servers_id -); \ No newline at end of file +); diff --git a/tapdb/sqlc/migrations/000014_multiverse_tree.up.sql b/tapdb/sqlc/migrations/000014_multiverse_tree.up.sql index 8a4dc68c1..f78117de7 100644 --- a/tapdb/sqlc/migrations/000014_multiverse_tree.up.sql +++ b/tapdb/sqlc/migrations/000014_multiverse_tree.up.sql @@ -6,32 +6,34 @@ CREATE TABLE IF NOT EXISTS multiverse_roots ( -- root of the SMT is deleted temporarily before inserting a new root, then -- this constraint is violated as there's no longer a root that this -- universe tree can point to. - namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots ( + namespace + ) DEFERRABLE INITIALLY DEFERRED, -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')) + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')) ); CREATE TABLE IF NOT EXISTS multiverse_leaves ( id INTEGER PRIMARY KEY, - multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots(id), + multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots (id), - asset_id BLOB CHECK(length(asset_id) = 32), + asset_id BLOB CHECK (length(asset_id) = 32), -- We use the 32 byte schnorr key here as this is what's used to derive the -- top-level Taproot Asset commitment key. - group_key BLOB CHECK(LENGTH(group_key) = 32), - + group_key BLOB CHECK (length(group_key) = 32), + leaf_node_key BLOB NOT NULL, leaf_node_namespace VARCHAR NOT NULL, -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ) ); @@ -43,15 +45,21 @@ CREATE UNIQUE INDEX multiverse_leaves_unique ON multiverse_leaves ( -- issuance or transfer multiverses, add them to the multiverse_roots table as -- well. Both statements are no-ops if the root doesn't exist yet. INSERT INTO multiverse_roots (namespace_root, proof_type) -SELECT 'multiverse-issuance', 'issuance' +SELECT + 'multiverse-issuance' AS namespace_root, + 'issuance' AS proof_type WHERE EXISTS ( - SELECT 1 FROM mssmt_roots WHERE namespace = 'multiverse-issuance' + SELECT 1 FROM mssmt_roots + WHERE namespace = 'multiverse-issuance' ); INSERT INTO multiverse_roots (namespace_root, proof_type) -SELECT 'multiverse-transfer', 'transfer' +SELECT + 'multiverse-transfer' AS namespace_root, + 'transfer' AS proof_type WHERE EXISTS ( - SELECT 1 FROM mssmt_roots WHERE namespace = 'multiverse-transfer' + SELECT 1 FROM mssmt_roots + WHERE namespace = 'multiverse-transfer' ); -- And now we create the multiverse_leaves entries for the multiverse roots. @@ -59,29 +67,35 @@ WHERE EXISTS ( INSERT INTO multiverse_leaves ( multiverse_root_id, asset_id, group_key, leaf_node_key, leaf_node_namespace ) SELECT - (SELECT id from multiverse_roots mr where mr.namespace_root = 'multiverse-issuance'), - CASE WHEN ur.group_key IS NULL THEN ur.asset_id ELSE NULL END, - ur.group_key, - -- UNHEX() only exists in SQLite and it doesn't take a second argument - -- (the 'hex' part). But it also doesn't complain about it, so we can - -- leave it in for the Postgres version which is replaced in-memory to - -- DECODE() which needs the 'hex' argument. - UNHEX(REPLACE(ur.namespace_root, 'issuance-', ''), 'hex'), - ur.namespace_root - FROM universe_roots ur - WHERE ur.namespace_root LIKE 'issuance-%'; + ( + SELECT mr.id FROM multiverse_roots AS mr + WHERE mr.namespace_root = 'multiverse-issuance' + ) AS multiverse_root_id, + CASE WHEN ur.group_key IS NULL THEN ur.asset_id END AS asset_id, + ur.group_key, + -- UNHEX() only exists in SQLite and it doesn't take a second argument + -- (the 'hex' part). But it also doesn't complain about it, so we can + -- leave it in for the Postgres version which is replaced in-memory to + -- DECODE() which needs the 'hex' argument. + unhex(replace(ur.namespace_root, 'issuance-', ''), 'hex') AS leaf_node_key, + ur.namespace_root AS leaf_node_namespace +FROM universe_roots AS ur +WHERE ur.namespace_root LIKE 'issuance-%'; INSERT INTO multiverse_leaves ( multiverse_root_id, asset_id, group_key, leaf_node_key, leaf_node_namespace ) SELECT - (SELECT id from multiverse_roots mr where mr.namespace_root = 'multiverse-transfer'), - CASE WHEN ur.group_key IS NULL THEN ur.asset_id ELSE NULL END, - ur.group_key, - -- UNHEX() only exists in SQLite and it doesn't take a second argument - -- (the 'hex' part). But it also doesn't complain about it, so we can - -- leave it in for the Postgres version which is replaced in-memory to - -- DECODE() which needs the 'hex' argument. - UNHEX(REPLACE(ur.namespace_root, 'transfer-', ''), 'hex'), - ur.namespace_root -FROM universe_roots ur + ( + SELECT mr.id FROM multiverse_roots AS mr + WHERE mr.namespace_root = 'multiverse-transfer' + ) AS multiverse_root_id, + CASE WHEN ur.group_key IS NULL THEN ur.asset_id END AS asset_id, + ur.group_key, + -- UNHEX() only exists in SQLite and it doesn't take a second argument + -- (the 'hex' part). But it also doesn't complain about it, so we can + -- leave it in for the Postgres version which is replaced in-memory to + -- DECODE() which needs the 'hex' argument. + unhex(replace(ur.namespace_root, 'transfer-', ''), 'hex') AS leaf_node_key, + ur.namespace_root AS leaf_node_namespace +FROM universe_roots AS ur WHERE ur.namespace_root LIKE 'transfer-%'; diff --git a/tapdb/sqlc/migrations/000015_asset_witnesses.up.sql b/tapdb/sqlc/migrations/000015_asset_witnesses.up.sql index 200987ecb..9ed50f5a6 100644 --- a/tapdb/sqlc/migrations/000015_asset_witnesses.up.sql +++ b/tapdb/sqlc/migrations/000015_asset_witnesses.up.sql @@ -1,14 +1,15 @@ -- The witness index indicates the order of the witness in the list of witnesses -- for a given asset. We'll be inserting an actual value in the next query, so -- we just start with -1. -ALTER TABLE asset_witnesses ADD COLUMN witness_index INTEGER NOT NULL DEFAULT -1; +ALTER TABLE asset_witnesses +ADD COLUMN witness_index INTEGER NOT NULL DEFAULT -1; -- Update the witness index to be the same as the witness id. We'll use the -- witness_index for sorting only, so setting the default to the witness_id is -- just to make sure we preserve the current order of witnesses while also -- satisfying the unique constraint we add below. UPDATE asset_witnesses SET witness_index = CAST(witness_id AS INTEGER) - WHERE witness_index = -1; +WHERE witness_index = -1; -- We need to be able to upsert witnesses, so we need a unique constraint on -- (asset_id, witness_index). diff --git a/tapdb/sqlc/migrations/000016_tapscript_trees.down.sql b/tapdb/sqlc/migrations/000016_tapscript_trees.down.sql index 2ba9ffbe9..98e432abe 100644 --- a/tapdb/sqlc/migrations/000016_tapscript_trees.down.sql +++ b/tapdb/sqlc/migrations/000016_tapscript_trees.down.sql @@ -2,4 +2,4 @@ DROP INDEX IF EXISTS tapscript_edges_unique; DROP TABLE IF EXISTS tapscript_edges; DROP TABLE IF EXISTS tapscript_nodes; DROP TABLE IF EXISTS tapscript_roots; -ALTER TABLE asset_minting_batches DROP COLUMN tapscript_sibling; \ No newline at end of file +ALTER TABLE asset_minting_batches DROP COLUMN tapscript_sibling; diff --git a/tapdb/sqlc/migrations/000016_tapscript_trees.up.sql b/tapdb/sqlc/migrations/000016_tapscript_trees.up.sql index 929389753..0550c2b42 100644 --- a/tapdb/sqlc/migrations/000016_tapscript_trees.up.sql +++ b/tapdb/sqlc/migrations/000016_tapscript_trees.up.sql @@ -6,43 +6,43 @@ ALTER TABLE asset_minting_batches ADD COLUMN tapscript_sibling BLOB; -- This table stores root hashes for tapscript trees, and a flag to ensure that -- the stored tree nodes are decoded correctly. CREATE TABLE IF NOT EXISTS tapscript_roots ( - root_id INTEGER PRIMARY KEY, + root_id INTEGER PRIMARY KEY, - -- The root hash of a tapscript tree. - root_hash BLOB NOT NULL UNIQUE CHECK(length(root_hash) = 32), + -- The root hash of a tapscript tree. + root_hash BLOB NOT NULL UNIQUE CHECK (length(root_hash) = 32), - -- A flag to record if a tapscript tree was stored as two tapHashes, or - -- a set of tapLeafs. - branch_only BOOLEAN NOT NULL DEFAULT FALSE + -- A flag to record if a tapscript tree was stored as two tapHashes, or + -- a set of tapLeafs. + branch_only BOOLEAN NOT NULL DEFAULT FALSE ); -- This table stores tapscript nodes, which are tapHashes or tapLeafs. A node -- may be included in multiple tapscript trees. CREATE TABLE IF NOT EXISTS tapscript_nodes ( - node_id INTEGER PRIMARY KEY, + node_id INTEGER PRIMARY KEY, - -- The serialized tapscript node, which may be a tapHash or tapLeaf. - raw_node BLOB NOT NULL UNIQUE + -- The serialized tapscript node, which may be a tapHash or tapLeaf. + raw_node BLOB NOT NULL UNIQUE ); -- This table stores tapscript edges, which link a serialized tapscript node -- to a tapscript tree root hash and preserve the node ordering in the tree. CREATE TABLE IF NOT EXISTS tapscript_edges ( - edge_id INTEGER PRIMARY KEY, + edge_id INTEGER PRIMARY KEY, - -- The root hash of a tree that includes the referenced tapscript node. - root_hash_id BIGINT NOT NULL REFERENCES tapscript_roots(root_id), + -- The root hash of a tree that includes the referenced tapscript node. + root_hash_id BIGINT NOT NULL REFERENCES tapscript_roots (root_id), - -- The index of the referenced node in the tapscript tree, which is - -- needed to correctly reconstruct the tapscript tree. - node_index BIGINT NOT NULL, + -- The index of the referenced node in the tapscript tree, which is + -- needed to correctly reconstruct the tapscript tree. + node_index BIGINT NOT NULL, - -- The tapscript node referenced by this edge. - raw_node_id BIGINT NOT NULL REFERENCES tapscript_nodes(node_id) + -- The tapscript node referenced by this edge. + raw_node_id BIGINT NOT NULL REFERENCES tapscript_nodes (node_id) ); -- A leaf can be repeated within a tree, and shared amongst trees, but there can -- only be one leaf at a given index in a tree. CREATE UNIQUE INDEX tapscript_edges_unique ON tapscript_edges ( - root_hash_id, node_index, raw_node_id + root_hash_id, node_index, raw_node_id ); diff --git a/tapdb/sqlc/migrations/000017_seedling_script_group_keys.down.sql b/tapdb/sqlc/migrations/000017_seedling_script_group_keys.down.sql index c89c6ff81..8ee29e382 100644 --- a/tapdb/sqlc/migrations/000017_seedling_script_group_keys.down.sql +++ b/tapdb/sqlc/migrations/000017_seedling_script_group_keys.down.sql @@ -1,3 +1,3 @@ ALTER TABLE asset_seedlings DROP COLUMN script_key_id; ALTER TABLE asset_seedlings DROP COLUMN group_internal_key_id; -ALTER TABLE asset_seedlings DROP COLUMN group_tapscript_root; \ No newline at end of file +ALTER TABLE asset_seedlings DROP COLUMN group_tapscript_root; diff --git a/tapdb/sqlc/migrations/000017_seedling_script_group_keys.up.sql b/tapdb/sqlc/migrations/000017_seedling_script_group_keys.up.sql index ef29f8a8c..44ae21d07 100644 --- a/tapdb/sqlc/migrations/000017_seedling_script_group_keys.up.sql +++ b/tapdb/sqlc/migrations/000017_seedling_script_group_keys.up.sql @@ -1,12 +1,15 @@ -- According to SQLite docs, a column added via ALTER TABLE cannot be both --- a REFERENCE and NOT NULL, so we'll have to enforce non-nilness outside of the DB. -ALTER TABLE asset_seedlings ADD COLUMN script_key_id BIGINT REFERENCES script_keys(script_key_id); +-- a REFERENCE and NOT NULL, so we'll have to enforce non-nilness outside of the +-- DB. +ALTER TABLE asset_seedlings +ADD COLUMN script_key_id BIGINT REFERENCES script_keys (script_key_id); --- For a group anchor, we derive the internal key for the future group key early, --- to allow use of custom group witnesses. -ALTER TABLE asset_seedlings ADD COLUMN group_internal_key_id BIGINT REFERENCES internal_keys(key_id); +-- For a group anchor, we derive the internal key for the future group key +-- early, to allow use of custom group witnesses. +ALTER TABLE asset_seedlings +ADD COLUMN group_internal_key_id BIGINT REFERENCES internal_keys (key_id); -- For a group key, the internal key can also be tweaked to commit to a -- tapscript tree. Once we finalize the batch, this tweak will also be stored -- as part of the asset group itself. -ALTER TABLE asset_seedlings ADD COLUMN group_tapscript_root BLOB; \ No newline at end of file +ALTER TABLE asset_seedlings ADD COLUMN group_tapscript_root BLOB; diff --git a/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.down.sql b/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.down.sql index 18eef23fb..cd6c263b8 100644 --- a/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.down.sql +++ b/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.down.sql @@ -1 +1 @@ -ALTER TABLE managed_utxos DROP COLUMN root_version; \ No newline at end of file +ALTER TABLE managed_utxos DROP COLUMN root_version; diff --git a/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.up.sql b/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.up.sql index 410f1deac..166334e3b 100644 --- a/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.up.sql +++ b/tapdb/sqlc/migrations/000019_managed_utxo_commitment_version.up.sql @@ -1,3 +1,3 @@ -- Add a field to store the version of the Taproot Asset commitment anchored in -- this UTXO. Existing UTXOs will have this set to NULL. -ALTER TABLE managed_utxos ADD COLUMN root_version SMALLINT; \ No newline at end of file +ALTER TABLE managed_utxos ADD COLUMN root_version SMALLINT; diff --git a/tapdb/sqlc/migrations/000020_asset_unique_key.up.sql b/tapdb/sqlc/migrations/000020_asset_unique_key.up.sql index 5586b757f..d9f8a7d52 100644 --- a/tapdb/sqlc/migrations/000020_asset_unique_key.up.sql +++ b/tapdb/sqlc/migrations/000020_asset_unique_key.up.sql @@ -3,14 +3,15 @@ -- contingent on this flag. CREATE TABLE tmp_duplicate_check AS SELECT CASE - WHEN EXISTS ( - SELECT 1 - FROM assets - GROUP BY genesis_id, script_key_id, amount, anchor_utxo_id - HAVING COUNT(*) > 1 + WHEN + EXISTS ( + SELECT 1 + FROM assets + GROUP BY genesis_id, script_key_id, amount, anchor_utxo_id + HAVING COUNT(*) > 1 ) THEN 1 - ELSE 0 + ELSE 0 END AS has_duplicates; -- Step 1: If the assets were spent, some of the duplicates might not have been @@ -18,42 +19,50 @@ END AS has_duplicates; -- below, we now update all assets that are spent. UPDATE assets SET spent = true -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND asset_id IN (SELECT a.asset_id - FROM assets a - JOIN managed_utxos mu - ON a.anchor_utxo_id = mu.utxo_id - JOIN chain_txns ct - ON mu.txn_id = ct.txn_id - LEFT JOIN asset_transfer_inputs ati - ON ati.anchor_point = mu.outpoint - WHERE a.spent = false - AND ati.input_id IS NOT NULL); +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND asset_id IN ( + SELECT a.asset_id + FROM assets AS a + INNER JOIN managed_utxos AS mu + ON a.anchor_utxo_id = mu.utxo_id + INNER JOIN chain_txns AS ct + ON mu.txn_id = ct.txn_id + LEFT OUTER JOIN asset_transfer_inputs AS ati + ON mu.outpoint = ati.anchor_point + WHERE + a.spent = false + AND ati.input_id IS NOT null +); -- Step 2: Create a temporary table to store the minimum asset_id for each -- unique combination. CREATE TABLE tmp_min_assets AS -SELECT MIN(asset_id) AS min_asset_id, - genesis_id, - script_key_id, - amount, - anchor_utxo_id, - spent +SELECT + MIN(asset_id) AS min_asset_id, + genesis_id, + script_key_id, + amount, + anchor_utxo_id, + spent FROM assets GROUP BY genesis_id, script_key_id, amount, anchor_utxo_id, spent; -- Step 3: Create a mapping table to track old and new asset_ids. CREATE TABLE tmp_asset_id_mapping AS -SELECT a.asset_id AS old_asset_id, - tmp.min_asset_id AS new_asset_id -FROM assets a - JOIN tmp_min_assets tmp - ON a.genesis_id = tmp.genesis_id - AND a.script_key_id = tmp.script_key_id - AND a.amount = tmp.amount - AND a.anchor_utxo_id = tmp.anchor_utxo_id - AND a.spent = tmp.spent; +SELECT + a.asset_id AS old_asset_id, + tmp.min_asset_id AS new_asset_id +FROM assets AS a +INNER JOIN tmp_min_assets AS tmp + ON + a.genesis_id = tmp.genesis_id + AND a.script_key_id = tmp.script_key_id + AND a.amount = tmp.amount + AND a.anchor_utxo_id = tmp.anchor_utxo_id + AND a.spent = tmp.spent; -- Step 4: To make the next step possible, we need to disable a unique index on -- the asset_witnesses table. We'll re-create it later. @@ -64,9 +73,11 @@ DROP INDEX IF EXISTS asset_witnesses_asset_id_witness_index_unique; UPDATE asset_witnesses SET asset_id = tmp_asset_id_mapping.new_asset_id FROM tmp_asset_id_mapping -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND asset_witnesses.asset_id = tmp_asset_id_mapping.old_asset_id; +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND asset_witnesses.asset_id = tmp_asset_id_mapping.old_asset_id; -- For the proofs we need skip re-assigning them to the asset that we're going -- to keep if it already has a proof. This is because the unique index on the @@ -74,47 +85,64 @@ WHERE (SELECT has_duplicates -- unique index, because it is an unnamed/inline index. UPDATE asset_proofs SET asset_id = filtered_mapping.new_asset_id -FROM ( - SELECT MIN(old_asset_id) AS old_asset_id, new_asset_id - FROM asset_proofs - JOIN tmp_asset_id_mapping - ON asset_proofs.asset_id = tmp_asset_id_mapping.old_asset_id - GROUP BY new_asset_id) AS filtered_mapping -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND asset_proofs.asset_id = filtered_mapping.old_asset_id; +FROM ( + SELECT + tmp.new_asset_id, + MIN(tmp.old_asset_id) AS old_asset_id + FROM asset_proofs + INNER JOIN tmp_asset_id_mapping AS tmp + ON asset_proofs.asset_id = tmp.old_asset_id + GROUP BY tmp.new_asset_id +) AS filtered_mapping +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND asset_proofs.asset_id = filtered_mapping.old_asset_id; -- Step 6: Remove duplicates from the asset_witnesses table. DELETE FROM asset_witnesses -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND witness_id NOT IN (SELECT min(witness_id) - FROM asset_witnesses - GROUP BY asset_id, witness_index); +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND witness_id NOT IN ( + SELECT MIN(witness_id) + FROM asset_witnesses + GROUP BY asset_id, witness_index +); -- Step 7: Re-enable the unique index on the asset_witnesses table. CREATE UNIQUE INDEX asset_witnesses_asset_id_witness_index_unique - ON asset_witnesses ( - asset_id, witness_index - ); +ON asset_witnesses ( + asset_id, witness_index +); -- Step 8: Delete any duplicate proofs. DELETE FROM asset_proofs -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND asset_id NOT IN (SELECT min_asset_id - FROM tmp_min_assets); +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND asset_id NOT IN ( + SELECT min_asset_id + FROM tmp_min_assets +); -- Step 9: Delete the duplicates from the assets table. This will then also -- delete dangling asset_witnesses. DELETE FROM assets -WHERE (SELECT has_duplicates - FROM tmp_duplicate_check) = 1 - AND asset_id NOT IN (SELECT min_asset_id - FROM tmp_min_assets); +WHERE ( + SELECT has_duplicates + FROM tmp_duplicate_check +) = 1 +AND asset_id NOT IN ( + SELECT min_asset_id + FROM tmp_min_assets +); -- Step 10: Clean up temporary tables. DROP TABLE IF EXISTS tmp_min_assets; @@ -123,6 +151,6 @@ DROP TABLE IF EXISTS tmp_duplicate_check; -- Step 11: Create the unique index on the assets table. CREATE UNIQUE INDEX assets_genesis_id_script_key_id_anchor_utxo_id_unique - ON assets ( - genesis_id, script_key_id, anchor_utxo_id - ); +ON assets ( + genesis_id, script_key_id, anchor_utxo_id +); diff --git a/tapdb/sqlc/migrations/000022_transfer_outputs_proof_delivered.down.sql b/tapdb/sqlc/migrations/000022_transfer_outputs_proof_delivered.down.sql index 3c914cb15..983dacc80 100644 --- a/tapdb/sqlc/migrations/000022_transfer_outputs_proof_delivered.down.sql +++ b/tapdb/sqlc/migrations/000022_transfer_outputs_proof_delivered.down.sql @@ -7,4 +7,4 @@ DROP INDEX asset_transfer_outputs_transfer_id_position_unique; ALTER TABLE asset_transfer_outputs DROP COLUMN proof_delivery_complete; -- Remove the `position` column from the `asset_transfer_outputs` table. -ALTER TABLE asset_transfer_outputs DROP COLUMN position; \ No newline at end of file +ALTER TABLE asset_transfer_outputs DROP COLUMN position; diff --git a/tapdb/sqlc/migrations/000023_multiverse_tree_re_apply.up.sql b/tapdb/sqlc/migrations/000023_multiverse_tree_re_apply.up.sql index ee20e3dfb..2255cd524 100644 --- a/tapdb/sqlc/migrations/000023_multiverse_tree_re_apply.up.sql +++ b/tapdb/sqlc/migrations/000023_multiverse_tree_re_apply.up.sql @@ -11,52 +11,59 @@ CREATE TABLE IF NOT EXISTS multiverse_roots ( -- root of the SMT is deleted temporarily before inserting a new root, then -- this constraint is violated as there's no longer a root that this -- universe tree can point to. - namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots ( + namespace + ) DEFERRABLE INITIALLY DEFERRED, -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')) + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')) ); CREATE TABLE IF NOT EXISTS multiverse_leaves ( id INTEGER PRIMARY KEY, - multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots(id), + multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots (id), - asset_id BLOB CHECK(length(asset_id) = 32), + asset_id BLOB CHECK (length(asset_id) = 32), -- We use the 32 byte schnorr key here as this is what's used to derive the -- top-level Taproot Asset commitment key. - group_key BLOB CHECK(LENGTH(group_key) = 32), - + group_key BLOB CHECK (length(group_key) = 32), + leaf_node_key BLOB NOT NULL, leaf_node_namespace VARCHAR NOT NULL, -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ) ); -CREATE UNIQUE INDEX IF NOT EXISTS multiverse_leaves_unique ON multiverse_leaves ( - leaf_node_key, leaf_node_namespace -); +CREATE UNIQUE INDEX IF NOT EXISTS multiverse_leaves_unique +ON multiverse_leaves (leaf_node_key, leaf_node_namespace); -- If there already is a multiverse root entry in the mssmt_roots for the -- issuance or transfer multiverses, add them to the multiverse_roots table as -- well. Both statements are no-ops if the root doesn't exist yet. INSERT INTO multiverse_roots (namespace_root, proof_type) -SELECT 'multiverse-issuance', 'issuance' +SELECT + 'multiverse-issuance' AS namespace_root, + 'issuance' AS proof_type WHERE EXISTS ( - SELECT 1 FROM mssmt_roots WHERE namespace = 'multiverse-issuance' + SELECT 1 FROM mssmt_roots + WHERE namespace = 'multiverse-issuance' ) ON CONFLICT DO NOTHING; INSERT INTO multiverse_roots (namespace_root, proof_type) -SELECT 'multiverse-transfer', 'transfer' +SELECT + 'multiverse-transfer' AS namespace_root, + 'transfer' AS proof_type WHERE EXISTS ( - SELECT 1 FROM mssmt_roots WHERE namespace = 'multiverse-transfer' + SELECT 1 FROM mssmt_roots + WHERE namespace = 'multiverse-transfer' ) ON CONFLICT DO NOTHING; -- And now we create the multiverse_leaves entries for the multiverse roots. @@ -64,31 +71,37 @@ WHERE EXISTS ( INSERT INTO multiverse_leaves ( multiverse_root_id, asset_id, group_key, leaf_node_key, leaf_node_namespace ) SELECT - (SELECT id from multiverse_roots mr where mr.namespace_root = 'multiverse-issuance'), - CASE WHEN ur.group_key IS NULL THEN ur.asset_id ELSE NULL END, - ur.group_key, - -- UNHEX() only exists in SQLite and it doesn't take a second argument - -- (the 'hex' part). But it also doesn't complain about it, so we can - -- leave it in for the Postgres version which is replaced in-memory to - -- DECODE() which needs the 'hex' argument. - UNHEX(REPLACE(ur.namespace_root, 'issuance-', ''), 'hex'), - ur.namespace_root -FROM universe_roots ur + ( + SELECT mr.id FROM multiverse_roots AS mr + WHERE mr.namespace_root = 'multiverse-issuance' + ) AS multiverse_root_id, + CASE WHEN ur.group_key IS NULL THEN ur.asset_id END AS asset_id, + ur.group_key, + -- UNHEX() only exists in SQLite and it doesn't take a second argument + -- (the 'hex' part). But it also doesn't complain about it, so we can + -- leave it in for the Postgres version which is replaced in-memory to + -- DECODE() which needs the 'hex' argument. + unhex(replace(ur.namespace_root, 'issuance-', ''), 'hex') AS leaf_node_key, + ur.namespace_root AS leaf_node_namespace +FROM universe_roots AS ur WHERE ur.namespace_root LIKE 'issuance-%' ON CONFLICT DO NOTHING; INSERT INTO multiverse_leaves ( multiverse_root_id, asset_id, group_key, leaf_node_key, leaf_node_namespace ) SELECT - (SELECT id from multiverse_roots mr where mr.namespace_root = 'multiverse-transfer'), - CASE WHEN ur.group_key IS NULL THEN ur.asset_id ELSE NULL END, - ur.group_key, - -- UNHEX() only exists in SQLite and it doesn't take a second argument - -- (the 'hex' part). But it also doesn't complain about it, so we can - -- leave it in for the Postgres version which is replaced in-memory to - -- DECODE() which needs the 'hex' argument. - UNHEX(REPLACE(ur.namespace_root, 'transfer-', ''), 'hex'), - ur.namespace_root -FROM universe_roots ur + ( + SELECT mr.id FROM multiverse_roots AS mr + WHERE mr.namespace_root = 'multiverse-transfer' + ) AS multiverse_root_id, + CASE WHEN ur.group_key IS NULL THEN ur.asset_id END AS asset_id, + ur.group_key, + -- UNHEX() only exists in SQLite and it doesn't take a second argument + -- (the 'hex' part). But it also doesn't complain about it, so we can + -- leave it in for the Postgres version which is replaced in-memory to + -- DECODE() which needs the 'hex' argument. + unhex(replace(ur.namespace_root, 'transfer-', ''), 'hex') AS leaf_node_key, + ur.namespace_root AS leaf_node_namespace +FROM universe_roots AS ur WHERE ur.namespace_root LIKE 'transfer-%' ON CONFLICT DO NOTHING; diff --git a/tapdb/sqlc/migrations/000024_universe_optimization_indexes_queries.up.sql b/tapdb/sqlc/migrations/000024_universe_optimization_indexes_queries.up.sql index 21f92e967..391d25370 100644 --- a/tapdb/sqlc/migrations/000024_universe_optimization_indexes_queries.up.sql +++ b/tapdb/sqlc/migrations/000024_universe_optimization_indexes_queries.up.sql @@ -1,14 +1,14 @@ --- Most impactful for query_asset_stats which currently has highest latency +-- Most impactful for query_asset_stats which currently has the highest latency -- Supports the common join pattern and filters on proof_type. -CREATE INDEX IF NOT EXISTS idx_universe_leaves_asset -ON universe_leaves(asset_genesis_id, universe_root_id); +CREATE INDEX IF NOT EXISTS idx_universe_leaves_asset +ON universe_leaves (asset_genesis_id, universe_root_id); --- Helps with the join conditions we frequently see --- This is especially useful for query_universe_leaves and improves join efficiency. -CREATE INDEX IF NOT EXISTS idx_mssmt_nodes_composite -ON mssmt_nodes(namespace, key, hash_key, sum); +-- Helps with the join conditions we frequently see. This is especially useful +-- for query_universe_leaves and improves join efficiency. +CREATE INDEX IF NOT EXISTS idx_mssmt_nodes_composite +ON mssmt_nodes (namespace, key, hash_key, sum); -- Optimizes the common namespace_root lookups along with proof_type filtering -- This helps with fetch_universe_root and roots-related queries. CREATE INDEX IF NOT EXISTS idx_universe_roots_composite -ON universe_roots(namespace_root, proof_type, asset_id); \ No newline at end of file +ON universe_roots (namespace_root, proof_type, asset_id); diff --git a/tapdb/sqlc/migrations/000025_burns.up.sql b/tapdb/sqlc/migrations/000025_burns.up.sql index c775761fe..f1743a615 100644 --- a/tapdb/sqlc/migrations/000025_burns.up.sql +++ b/tapdb/sqlc/migrations/000025_burns.up.sql @@ -1,19 +1,19 @@ CREATE TABLE IF NOT EXISTS asset_burn_transfers ( -- The auto-incrementing integer that identifies this burn transfer. - burn_id INTEGER PRIMARY KEY, + burn_id INTEGER PRIMARY KEY, -- A reference to the primary key of the transfer that includes this burn. - transfer_id INTEGER NOT NULL REFERENCES asset_transfers(id), - + transfer_id INTEGER NOT NULL REFERENCES asset_transfers (id), -- noqa: LL01 + -- A note that may contain user defined metadata. note TEXT, -- The asset id of the burnt asset. - asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id), + asset_id BLOB NOT NULL REFERENCES genesis_assets (asset_id), -- The group key of the group the burnt asset belonged to. - group_key BLOB REFERENCES asset_groups(tweaked_group_key), + group_key BLOB REFERENCES asset_groups (tweaked_group_key), -- The amount of the asset that was burned. amount BIGINT NOT NULL -) \ No newline at end of file +) diff --git a/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.down.sql b/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.down.sql index 05f82e868..b50a1939f 100644 --- a/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.down.sql +++ b/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.down.sql @@ -12,12 +12,20 @@ ALTER TABLE asset_groups DROP COLUMN custom_subtree_root_id; -- Recreate the previous view. CREATE VIEW key_group_info_view AS SELECT - witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family, - substr(tweaked_group_key, 2) AS x_only_group_key -FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id -WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info_view); \ No newline at end of file + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family, + substr(grp.tweaked_group_key, 2) AS x_only_group_key +FROM asset_group_witnesses AS wit +JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id +JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id +WHERE wit.gen_asset_id IN ( + SELECT giv.gen_asset_id FROM genesis_info_view AS giv +); diff --git a/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.up.sql b/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.up.sql index ad6d958fd..224d91047 100644 --- a/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.up.sql +++ b/tapdb/sqlc/migrations/000026_asset_group_version_customsubtree.up.sql @@ -6,8 +6,8 @@ ALTER TABLE asset_groups ADD COLUMN version INTEGER NOT NULL DEFAULT 0; -- custom tapscript subtree to the asset group. The subtree includes -- user-defined asset group key scripts. ALTER TABLE asset_groups -ADD COLUMN custom_subtree_root_id INTEGER -REFERENCES tapscript_roots(root_id); +ADD COLUMN custom_subtree_root_id INTEGER -- noqa: LL01 +REFERENCES tapscript_roots (root_id); -- TODO(guggero): Use BIGINT. -- We're going to recreate key_group_info_view to include the new columns. -- Therefore, we need to drop the existing view. @@ -18,19 +18,28 @@ DROP VIEW IF EXISTS key_group_info_view; -- This view is useful for including group key information via joins. CREATE VIEW key_group_info_view AS SELECT - groups.version, witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family, - substr(tweaked_group_key, 2) AS x_only_group_key, + grp.version, + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family, + substr(grp.tweaked_group_key, 2) AS x_only_group_key, tapscript_roots.root_hash AS custom_subtree_root -FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id +FROM asset_group_witnesses AS wit +JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id +JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id - -- Include the tapscript root hash for the custom subtree. Here we use - -- a LEFT JOIN to allow for the case where a group does not have a - -- custom subtree in which case the custom_subtree_root will be NULL. - LEFT JOIN tapscript_roots - ON groups.custom_subtree_root_id = tapscript_roots.root_id -WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info_view); +-- Include the tapscript root hash for the custom subtree. Here we use +-- a LEFT JOIN to allow for the case where a group does not have a +-- custom subtree in which case the custom_subtree_root will be NULL. +LEFT JOIN tapscript_roots + ON grp.custom_subtree_root_id = tapscript_roots.root_id +WHERE wit.gen_asset_id IN ( + SELECT giv.gen_asset_id FROM genesis_info_view AS giv +); diff --git a/tapdb/sqlc/migrations/000027_better_universe_stats.down.sql b/tapdb/sqlc/migrations/000027_better_universe_stats.down.sql index 54acb6a74..eb2085e0f 100644 --- a/tapdb/sqlc/migrations/000027_better_universe_stats.down.sql +++ b/tapdb/sqlc/migrations/000027_better_universe_stats.down.sql @@ -5,12 +5,14 @@ DROP VIEW universe_stats; CREATE VIEW universe_stats AS SELECT - COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 ELSE NULL END) AS total_asset_syncs, - COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 ELSE NULL END) AS total_asset_proofs, roots.asset_id, roots.group_key, - roots.proof_type -FROM universe_events u -JOIN universe_roots roots - ON u.universe_root_id = roots.id -GROUP BY roots.asset_id, roots.group_key, roots.proof_type; \ No newline at end of file + roots.proof_type, + COUNT(CASE WHEN u.event_type = 'SYNC' THEN 1 END) + AS total_asset_syncs, + COUNT(CASE WHEN u.event_type = 'NEW_PROOF' THEN 1 END) + AS total_asset_proofs +FROM universe_events AS u +INNER JOIN universe_roots AS roots + ON u.universe_root_id = roots.id +GROUP BY roots.asset_id, roots.group_key, roots.proof_type; diff --git a/tapdb/sqlc/migrations/000027_better_universe_stats.up.sql b/tapdb/sqlc/migrations/000027_better_universe_stats.up.sql index 836887f94..6f96e23b2 100644 --- a/tapdb/sqlc/migrations/000027_better_universe_stats.up.sql +++ b/tapdb/sqlc/migrations/000027_better_universe_stats.up.sql @@ -2,36 +2,48 @@ DROP VIEW universe_stats; CREATE VIEW universe_stats AS WITH sync_counts AS ( - SELECT universe_root_id, COUNT(*) AS count + SELECT + universe_root_id, + COUNT(*) AS count FROM universe_events WHERE event_type = 'SYNC' GROUP BY universe_root_id -), proof_counts AS ( - SELECT universe_root_id, event_type, COUNT(*) AS count +), + +proof_counts AS ( + SELECT + universe_root_id, + event_type, + COUNT(*) AS count FROM universe_events WHERE event_type = 'NEW_PROOF' GROUP BY universe_root_id, event_type -), aggregated AS ( - SELECT COALESCE(SUM(count), 0) as total_asset_syncs, - 0 AS total_asset_proofs, - universe_root_id +), + +aggregated AS ( + SELECT + COALESCE(SUM(count), 0) AS total_asset_syncs, + 0 AS total_asset_proofs, + universe_root_id FROM sync_counts GROUP BY universe_root_id UNION ALL - SELECT 0 AS total_asset_syncs, - COALESCE(SUM(count), 0) as total_asset_proofs, - universe_root_id + SELECT + 0 AS total_asset_syncs, + COALESCE(SUM(count), 0) AS total_asset_proofs, + universe_root_id FROM proof_counts GROUP BY universe_root_id ) + SELECT - SUM(ag.total_asset_syncs) AS total_asset_syncs, - SUM(ag.total_asset_proofs) AS total_asset_proofs, roots.asset_id, roots.group_key, - roots.proof_type -FROM aggregated ag -JOIN universe_roots roots + roots.proof_type, + SUM(ag.total_asset_syncs) AS total_asset_syncs, + SUM(ag.total_asset_proofs) AS total_asset_proofs +FROM aggregated AS ag +INNER JOIN universe_roots AS roots ON ag.universe_root_id = roots.id GROUP BY roots.asset_id, roots.group_key, roots.proof_type ORDER BY roots.asset_id, roots.group_key, roots.proof_type; diff --git a/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.down.sql b/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.down.sql index 2046bf94b..33cefa7ca 100644 --- a/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.down.sql +++ b/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.down.sql @@ -1,4 +1,4 @@ ALTER TABLE assets_meta DROP COLUMN meta_decimal_display; ALTER TABLE assets_meta DROP COLUMN meta_universe_commitments; ALTER TABLE assets_meta DROP COLUMN meta_canonical_universes; -ALTER TABLE assets_meta DROP COLUMN meta_delegation_key; \ No newline at end of file +ALTER TABLE assets_meta DROP COLUMN meta_delegation_key; diff --git a/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.up.sql b/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.up.sql index 64662305a..7d2acda00 100644 --- a/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.up.sql +++ b/tapdb/sqlc/migrations/000028_asset_meta_tlv_fields.up.sql @@ -12,11 +12,11 @@ ALTER TABLE assets_meta ADD COLUMN meta_universe_commitments BOOL; -- need to allow for the control character in between, so we just assume 256 -- characters per URL. ALTER TABLE assets_meta ADD COLUMN meta_canonical_universes BLOB - CHECK(LENGTH(meta_canonical_universes) <= 4096); +CHECK (LENGTH(meta_canonical_universes) <= 4096); -- We don't want to decide on the SQL level if this key is a 33-byte compressed -- or 32-byte x-only one, so we just use the <= operator in case we ever need -- to change the semantics on this field (on the SQL level we just care about -- there being a size restriction in the first place). ALTER TABLE assets_meta ADD COLUMN meta_delegation_key BLOB - CHECK(LENGTH(meta_delegation_key) <= 33); +CHECK (LENGTH(meta_delegation_key) <= 33); diff --git a/tapdb/sqlc/migrations/000029_ignore_burn_universe.down.sql b/tapdb/sqlc/migrations/000029_ignore_burn_universe.down.sql index 957357899..a1542b00a 100644 --- a/tapdb/sqlc/migrations/000029_ignore_burn_universe.down.sql +++ b/tapdb/sqlc/migrations/000029_ignore_burn_universe.down.sql @@ -1,37 +1,53 @@ --- ****************************************************************************************** +-- ***************************************************************************** -- DOWN MIGRATION (Extended): Revert proof_type modifications. -- -- Changes: --- 1. For federation_global_sync_config, re-create the table using the original schema since --- the proof_type column is a primary key. This avoids dropping a primary key column. +-- 1. For federation_global_sync_config, re-create the table using the original +-- schema since the proof_type column is a primary key. This avoids dropping +-- a primary key column. -- 2. For other tables, revert changes via ALTER statements. -- 3. Drop the 'proof_types' enum table. --- 4. Recreate the 'universe_stats' view using the latest definition to match the reverted schema. --- ****************************************************************************************** +-- 4. Recreate the 'universe_stats' view using the latest definition to match +-- the reverted schema. +-- ***************************************************************************** -- For universe_roots -ALTER TABLE universe_roots ADD COLUMN proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')); +ALTER TABLE universe_roots ADD COLUMN proof_type TEXT NOT NULL CHECK ( + proof_type IN ('issuance', 'transfer') +); UPDATE universe_roots SET proof_type = proof_type_new; ALTER TABLE universe_roots DROP COLUMN proof_type_new; -- For federation_global_sync_config: Recreate the original table schema. -ALTER TABLE federation_global_sync_config RENAME TO federation_global_sync_config_new; +ALTER TABLE federation_global_sync_config +RENAME TO federation_global_sync_config_new; CREATE TABLE federation_global_sync_config ( - proof_type TEXT NOT NULL PRIMARY KEY CHECK(proof_type IN ('issuance', 'transfer')), + proof_type TEXT NOT NULL PRIMARY KEY CHECK ( + proof_type IN ('issuance', 'transfer') + ), allow_sync_insert BOOLEAN NOT NULL, allow_sync_export BOOLEAN NOT NULL ); -INSERT INTO federation_global_sync_config (proof_type, allow_sync_insert, allow_sync_export) -SELECT proof_type, allow_sync_insert, allow_sync_export +INSERT INTO federation_global_sync_config ( + proof_type, allow_sync_insert, allow_sync_export +) +SELECT + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_global_sync_config_new; DROP TABLE federation_global_sync_config_new; -- For federation_uni_sync_config -ALTER TABLE federation_uni_sync_config RENAME COLUMN proof_type TO proof_type_new; -ALTER TABLE federation_uni_sync_config ADD COLUMN proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')); +ALTER TABLE federation_uni_sync_config +RENAME COLUMN proof_type TO proof_type_new; +ALTER TABLE federation_uni_sync_config +ADD COLUMN proof_type TEXT NOT NULL CHECK ( + proof_type IN ('issuance', 'transfer') +); UPDATE federation_uni_sync_config SET proof_type = proof_type_new; ALTER TABLE federation_uni_sync_config DROP COLUMN proof_type_new; diff --git a/tapdb/sqlc/migrations/000029_ignore_burn_universe.up.sql b/tapdb/sqlc/migrations/000029_ignore_burn_universe.up.sql index 0acfc8d12..77a674270 100644 --- a/tapdb/sqlc/migrations/000029_ignore_burn_universe.up.sql +++ b/tapdb/sqlc/migrations/000029_ignore_burn_universe.up.sql @@ -1,38 +1,41 @@ --- ****************************************************************************************** --- UP MIGRATION: Expanding Allowed Proof Types with an Enum-Style Reference Column +-- ***************************************************************************** +-- UP MIGRATION: Expanding Allowed Proof Types with an Enum-Style Reference +-- Column -- --- In this migration, we expand the allowed values for the "proof_type" column in our --- universe-related tables by introducing a new enum-like table ("proof_types"). +-- In this migration, we expand the allowed values for the "proof_type" column +-- in our universe-related tables by introducing a new enum-like table +-- ("proof_types"). -- -- Changes introduced: -- 1. Create a new table "proof_types" that stores valid proof type values: -- 'issuance', 'transfer', 'burn', 'ignore'. -- 2. For each affected table (universe_roots, federation_global_sync_config, -- federation_uni_sync_config): --- a) Add a new column "proof_type_ext" with a NOT NULL constraint and a foreign --- key reference to proof_types(proof_type). --- b) Copy the data from the existing "proof_type" column into "proof_type_ext". +-- a) Add a new column "proof_type_ext" with a NOT NULL constraint and a +-- foreign key reference to proof_types(proof_type). +-- b) Copy the data from the existing "proof_type" column into +-- "proof_type_ext". -- c) Rename the original "proof_type" to "proof_type_old". --- d) Rename "proof_type_ext" to "proof_type" so that the final column name remains --- unchanged. +-- d) Rename "proof_type_ext" to "proof_type" so that the final column +-- name remains unchanged. -- e) Drop the temporary "proof_type_old" column. -- --- This approach preserves existing data while allowing new rows to use the expanded set --- of proof types. --- ****************************************************************************************** +-- This approach preserves existing data while allowing new rows to use the +-- expanded set of proof types. +-- ***************************************************************************** --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 1: Drop Dependent Views -- Drop the universe_stats view so that subsequent schema modifications on -- columns referenced in the view do not cause errors. --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- DROP VIEW IF EXISTS universe_stats; --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 2: Create Enum Table for Proof Types --- Create a new table "proof_types" to hold the allowed values for the proof type field. --- Insert the allowed values. --------------------------------------------------------------------------------------------- +-- Create a new table "proof_types" to hold the allowed values for the proof +-- type field. Insert the allowed values. +-------------------------------------------------------------------------------- CREATE TABLE IF NOT EXISTS proof_types ( proof_type TEXT PRIMARY KEY ); @@ -42,96 +45,120 @@ INSERT INTO proof_types (proof_type) VALUES ('transfer'); INSERT INTO proof_types (proof_type) VALUES ('burn'); INSERT INTO proof_types (proof_type) VALUES ('ignore'); --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 3: Update the universe_roots Table -- a) Add a new column "proof_type_ext" that references proof_types. -- b) Copy over existing data from proof_type. -- c) Drop any composite index that depends on the old proof_type column. -- d) Drop the old proof_type column, and rename proof_type_ext to proof_type. -- e) Recreate the composite index. --------------------------------------------------------------------------------------------- -ALTER TABLE universe_roots - ADD COLUMN proof_type_ext TEXT REFERENCES proof_types(proof_type); +-------------------------------------------------------------------------------- +ALTER TABLE universe_roots +ADD COLUMN proof_type_ext TEXT REFERENCES proof_types (proof_type); UPDATE universe_roots - SET proof_type_ext = proof_type; +SET proof_type_ext = proof_type; DROP INDEX IF EXISTS idx_universe_roots_composite; ALTER TABLE universe_roots DROP COLUMN proof_type; ALTER TABLE universe_roots RENAME COLUMN proof_type_ext TO proof_type; -CREATE INDEX idx_universe_roots_composite ON universe_roots(namespace_root, proof_type, asset_id); +CREATE INDEX idx_universe_roots_composite ON universe_roots ( + namespace_root, proof_type, asset_id +); --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 4: Update the federation_global_sync_config Table --- Since the proof_type column here is a primary key, we cannot drop it directly. --- Therefore, we rename the existing table, create a new table with the updated schema, --- copy the data over, and drop the old table. --------------------------------------------------------------------------------------------- -ALTER TABLE federation_global_sync_config RENAME TO federation_global_sync_config_old; +-- Since the proof_type column here is a primary key, we cannot drop it +-- directly. Therefore, we rename the existing table, create a new table with +-- the updated schema, copy the data over, and drop the old table. +-------------------------------------------------------------------------------- +ALTER TABLE federation_global_sync_config +RENAME TO federation_global_sync_config_old; CREATE TABLE federation_global_sync_config ( - proof_type TEXT NOT NULL PRIMARY KEY REFERENCES proof_types(proof_type), + proof_type TEXT NOT NULL PRIMARY KEY REFERENCES proof_types (proof_type), allow_sync_insert BOOLEAN NOT NULL, allow_sync_export BOOLEAN NOT NULL ); -INSERT INTO federation_global_sync_config (proof_type, allow_sync_insert, allow_sync_export) -SELECT proof_type, allow_sync_insert, allow_sync_export +INSERT INTO federation_global_sync_config ( + proof_type, allow_sync_insert, allow_sync_export +) +SELECT + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_global_sync_config_old; DROP TABLE federation_global_sync_config_old; --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 5: Update the federation_uni_sync_config Table --- Add a new column ("proof_type_ext"), copy old data, then rename columns to finalize changes. --------------------------------------------------------------------------------------------- -ALTER TABLE federation_uni_sync_config - ADD COLUMN proof_type_ext TEXT REFERENCES proof_types(proof_type); +-- Add a new column ("proof_type_ext"), copy old data, then rename columns to +-- finalize changes. +-------------------------------------------------------------------------------- +ALTER TABLE federation_uni_sync_config +ADD COLUMN proof_type_ext TEXT REFERENCES proof_types (proof_type); UPDATE federation_uni_sync_config - SET proof_type_ext = proof_type; +SET proof_type_ext = proof_type; -ALTER TABLE federation_uni_sync_config RENAME COLUMN proof_type TO proof_type_old; -ALTER TABLE federation_uni_sync_config RENAME COLUMN proof_type_ext TO proof_type; +ALTER TABLE federation_uni_sync_config +RENAME COLUMN proof_type TO proof_type_old; +ALTER TABLE federation_uni_sync_config +RENAME COLUMN proof_type_ext TO proof_type; ALTER TABLE federation_uni_sync_config DROP COLUMN proof_type_old; --------------------------------------------------------------------------------------------- +-------------------------------------------------------------------------------- -- Section 6: Re-create the universe_stats View --- Rebuild the view using the latest definition (from 000027_better_universe_stats.up.sql) --- so that downstream queries see the updated schema for universe_roots. --------------------------------------------------------------------------------------------- +-- Rebuild the view using the latest definition (from +-- 000027_better_universe_stats.up.sql) so that downstream queries see the +-- updated schema for universe_roots. +-------------------------------------------------------------------------------- CREATE VIEW universe_stats AS WITH sync_counts AS ( - SELECT universe_root_id, COUNT(*) AS count + SELECT + universe_root_id, + COUNT(*) AS count FROM universe_events WHERE event_type = 'SYNC' GROUP BY universe_root_id -), proof_counts AS ( - SELECT universe_root_id, event_type, COUNT(*) AS count +), + +proof_counts AS ( + SELECT + universe_root_id, + event_type, + COUNT(*) AS count FROM universe_events WHERE event_type = 'NEW_PROOF' GROUP BY universe_root_id, event_type -), aggregated AS ( - SELECT COALESCE(SUM(count), 0) as total_asset_syncs, - 0 AS total_asset_proofs, - universe_root_id +), + +aggregated AS ( + SELECT + COALESCE(SUM(count), 0) AS total_asset_syncs, + 0 AS total_asset_proofs, + universe_root_id FROM sync_counts GROUP BY universe_root_id UNION ALL - SELECT 0 AS total_asset_syncs, - COALESCE(SUM(count), 0) as total_asset_proofs, - universe_root_id + SELECT + 0 AS total_asset_syncs, + COALESCE(SUM(count), 0) AS total_asset_proofs, + universe_root_id FROM proof_counts GROUP BY universe_root_id ) + SELECT - SUM(ag.total_asset_syncs) AS total_asset_syncs, - SUM(ag.total_asset_proofs) AS total_asset_proofs, roots.asset_id, roots.group_key, - roots.proof_type -FROM aggregated ag -JOIN universe_roots roots + roots.proof_type, + SUM(ag.total_asset_syncs) AS total_asset_syncs, + SUM(ag.total_asset_proofs) AS total_asset_proofs +FROM aggregated AS ag +INNER JOIN universe_roots AS roots ON ag.universe_root_id = roots.id GROUP BY roots.asset_id, roots.group_key, roots.proof_type ORDER BY roots.asset_id, roots.group_key, roots.proof_type; diff --git a/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.down.sql b/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.down.sql index 09d43dd60..ed741cdcc 100644 --- a/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.down.sql +++ b/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.down.sql @@ -8,4 +8,4 @@ DROP TABLE IF EXISTS mint_anchor_uni_commitments; ALTER TABLE asset_minting_batches DROP COLUMN universe_commitments; -- Drop the assets output index column from the asset_minting_batches table. -ALTER TABLE asset_minting_batches DROP COLUMN assets_output_index; \ No newline at end of file +ALTER TABLE asset_minting_batches DROP COLUMN assets_output_index; diff --git a/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.up.sql b/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.up.sql index 512d11056..9f1e92221 100644 --- a/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.up.sql +++ b/tapdb/sqlc/migrations/000030_mint_anchor_uni_commitments.up.sql @@ -9,16 +9,15 @@ UPDATE asset_minting_batches SET assets_output_index = CASE WHEN change_output_index = 1 THEN 0 WHEN change_output_index = 0 THEN 1 - -- If change_output_index is neither 0 nor 1, just set the asset anchor - -- output index to NULL. - ELSE NULL +-- If change_output_index is neither 0 nor 1, just set the asset anchor +-- output index to NULL. END; -- Add a flag column which indicates if the universe commitments are enabled for -- this minting batch. This should default to false for all existing minting -- batches. ALTER TABLE asset_minting_batches - ADD COLUMN universe_commitments BOOLEAN NOT NULL DEFAULT FALSE; +ADD COLUMN universe_commitments BOOLEAN NOT NULL DEFAULT FALSE; -- Create a table to relate a mint batch anchor transaction to its universe -- commitments. @@ -26,7 +25,8 @@ CREATE TABLE IF NOT EXISTS mint_anchor_uni_commitments ( id INTEGER PRIMARY KEY, -- The ID of the minting batch this universe commitment relates to. - batch_id INTEGER NOT NULL REFERENCES asset_minting_batches(batch_id), + batch_id INTEGER NOT NULL -- noqa: LL01 + REFERENCES asset_minting_batches (batch_id), -- TODO(guggero): Use BIGINT. -- The index of the mint batch anchor transaction pre-commitment output. tx_output_index INTEGER NOT NULL, @@ -41,4 +41,4 @@ CREATE TABLE IF NOT EXISTS mint_anchor_uni_commitments ( -- Create a unique index on the mint_anchor_uni_commitments table to enforce the -- uniqueness of (batch_id, tx_output_index) pairs. CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique - ON mint_anchor_uni_commitments (batch_id, tx_output_index); +ON mint_anchor_uni_commitments (batch_id, tx_output_index); diff --git a/tapdb/sqlc/migrations/000031_ignore_tree.down.sql b/tapdb/sqlc/migrations/000031_ignore_tree.down.sql index 7b1eb5623..45a8cd876 100644 --- a/tapdb/sqlc/migrations/000031_ignore_tree.down.sql +++ b/tapdb/sqlc/migrations/000031_ignore_tree.down.sql @@ -4,19 +4,22 @@ -- OVERVIEW: -- -- This downgrade script reverts the changes made in migration 30, rolling back --- from a three-column UNIQUE constraint (minting_point, script_key_bytes, leaf_node_namespace) --- to the original two-column constraint (minting_point, script_key_bytes). +-- from a three-column UNIQUE constraint (minting_point, script_key_bytes, +-- leaf_node_namespace) to the original two-column constraint (minting_point, +-- script_key_bytes). -- -- DOWNGRADE STRATEGY: -- --- Since the enhanced constraint allowed multiple entries with the same minting_point --- and script_key_bytes (differing only by leaf_node_namespace), we need to be --- selective about which rows to keep when reverting to the more restrictive constraint. +-- Since the enhanced constraint allowed multiple entries with the same +-- minting_point and script_key_bytes (differing only by leaf_node_namespace), +-- we need to be selective about which rows to keep when reverting to the more +-- restrictive constraint. -- -- This script: -- 1. Handles foreign key dependencies (backing up federation_proof_sync_log). -- 2. Creates a new table with the original two-column constraint. --- 3. Selectively migrates data (keeping only one row per minting_point/script_key_bytes pair). +-- 3. Selectively migrates data (keeping only one row per +-- minting_point/script_key_bytes pair). -- 4. Replaces the current table with the downgraded version. -- 5. Restores dependent tables with proper references. -- @@ -31,13 +34,14 @@ -- Create a temporary backup table for federation_proof_sync_log. CREATE TABLE new_federation_proof_sync_log ( id INTEGER PRIMARY KEY, - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), timestamp TIMESTAMP NOT NULL, attempt_counter BIGINT NOT NULL DEFAULT 0, - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), - proof_leaf_id BIGINT NOT NULL, -- FK constraint intentionally omitted for now. - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), + -- FK constraint intentionally omitted for now. + proof_leaf_id BIGINT NOT NULL, + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); -- Backup all existing federation_proof_sync_log data. @@ -52,23 +56,25 @@ DROP TABLE federation_proof_sync_log; -- Create a new table with the original two-column unique constraint. CREATE TABLE old_universe_leaves ( id INTEGER PRIMARY KEY, - asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), - minting_point BLOB NOT NULL, - script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), + minting_point BLOB NOT NULL, + script_key_bytes BLOB NOT NULL CHECK (LENGTH(script_key_bytes) = 32), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), leaf_node_key BLOB, leaf_node_namespace VARCHAR NOT NULL - -- The original, more restrictive unique constraint was defined here. +-- The original, more restrictive unique constraint was defined here. ); -- Create the named unique index separately. -CREATE UNIQUE INDEX universe_leaves_unique_minting_script ON old_universe_leaves(minting_point, script_key_bytes); +CREATE UNIQUE INDEX universe_leaves_unique_minting_script +ON old_universe_leaves (minting_point, script_key_bytes); -- ==== PHASE 3: SELECTIVE DATA MIGRATION ==== -- Copy data from the current table to the new one, but we must be selective -- to avoid violating the more restrictive unique constraint. --- For each (minting_point, script_key_bytes) group, we keep only the row with the lowest ID. +-- For each (minting_point, script_key_bytes) group, we keep only the row with +-- the lowest ID. INSERT INTO old_universe_leaves ( id, asset_genesis_id, @@ -78,21 +84,27 @@ INSERT INTO old_universe_leaves ( leaf_node_key, leaf_node_namespace ) -SELECT ul.id, - ul.asset_genesis_id, - ul.minting_point, - ul.script_key_bytes, - ul.universe_root_id, - ul.leaf_node_key, - ul.leaf_node_namespace -FROM universe_leaves ul -JOIN ( +WITH sub AS ( -- This subquery identifies the lowest ID for each unique combination -- of minting_point and script_key_bytes. - SELECT minting_point, script_key_bytes, MIN(id) AS min_id + SELECT + minting_point, + script_key_bytes, + MIN(id) AS min_id FROM universe_leaves GROUP BY minting_point, script_key_bytes -) sub ON ul.id = sub.min_id; +) + +SELECT + ul.id, + ul.asset_genesis_id, + ul.minting_point, + ul.script_key_bytes, + ul.universe_root_id, + ul.leaf_node_key, + ul.leaf_node_namespace +FROM universe_leaves AS ul +INNER JOIN sub ON ul.id = sub.min_id; -- ==== PHASE 4: TABLE REPLACEMENT ==== -- Remove the current table with the three-column constraint. @@ -102,28 +114,33 @@ DROP TABLE universe_leaves; ALTER TABLE old_universe_leaves RENAME TO universe_leaves; -- Recreate the indexes that existed on the original table. -CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves(leaf_node_key); -CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves(leaf_node_namespace); +CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves ( + leaf_node_key +); +CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves ( + leaf_node_namespace +); -- ==== PHASE 5: RESTORE DEPENDENT TABLES ==== --- Recreate the federation_proof_sync_log table with proper foreign key references. +-- Recreate the federation_proof_sync_log table with proper foreign key +-- references. CREATE TABLE federation_proof_sync_log ( id INTEGER PRIMARY KEY, - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), timestamp TIMESTAMP NOT NULL, attempt_counter BIGINT NOT NULL DEFAULT 0, - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), -- Now we can safely reference the new universe_leaves table. - proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves (id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); -- Restore federation_proof_sync_log data, but only for leaves that still exist. -- Some leaves may have been dropped during the selective migration process. INSERT INTO federation_proof_sync_log SELECT * FROM new_federation_proof_sync_log -WHERE proof_leaf_id IN (SELECT id FROM universe_leaves); +WHERE proof_leaf_id IN (SELECT ul.id FROM universe_leaves AS ul); -- Clean up by dropping the temporary table. DROP TABLE new_federation_proof_sync_log; diff --git a/tapdb/sqlc/migrations/000031_ignore_tree.up.sql b/tapdb/sqlc/migrations/000031_ignore_tree.up.sql index 88beaaa03..93a976e6a 100644 --- a/tapdb/sqlc/migrations/000031_ignore_tree.up.sql +++ b/tapdb/sqlc/migrations/000031_ignore_tree.up.sql @@ -5,52 +5,59 @@ -- -- This migration modifies the UNIQUE constraint on the universe_leaves table -- to allow assets to exist in multiple universe trees simultaneously. We change --- from a two-column constraint (minting_point, script_key_bytes) to a three-column --- constraint (minting_point, script_key_bytes, leaf_node_namespace). +-- from a two-column constraint (minting_point, script_key_bytes) to a +-- three-column constraint (minting_point, script_key_bytes, +-- leaf_node_namespace). -- -- PROBLEM STATEMENT: -- -- In the current schema, the universe_leaves table enforces uniqueness based on --- minting_point and script_key_bytes. This design assumes an asset belongs to only --- one type of universe tree (transfer or issuance). However, with the introduction --- of "ignore" and "burn" universe trees, the same asset might need to exist in --- multiple trees simultaneously. +-- minting_point and script_key_bytes. This design assumes an asset belongs to +-- only one type of universe tree (transfer or issuance). However, with the +-- introduction of "ignore" and "burn" universe trees, the same asset might need +-- to exist in multiple trees simultaneously. -- -- SOLUTION: -- --- We expand the unique constraint to include the leaf_node_namespace column. This --- additional dimension allows distinguishing between assets based on which universe --- tree they belong to, while still preventing duplicates within the same tree. +-- We expand the unique constraint to include the leaf_node_namespace column. +-- This additional dimension allows distinguishing between assets based on which +-- universe tree they belong to, while still preventing duplicates within the +-- same tree. -- -- MIGRATION STRATEGY: -- -- Since SQLite has limited ALTER TABLE capabilities and we need to maintain --- compatibility with both SQLite and PostgreSQL, we use a table recreation approach: +-- compatibility with both SQLite and PostgreSQL, we use a table recreation +-- approach: -- --- 1. Handle foreign key dependencies (back up and remove federation_proof_sync_log). +-- 1. Handle foreign key dependencies (back up and remove +-- federation_proof_sync_log). -- 2. Create a new table with the updated constraint. -- 3. Copy existing data. -- 4. Replace the old table with the new one. -- 5. Restore dependent tables with proper references. -- --- This approach works with both database engines while preserving data integrity. +-- This approach works with both database engines while preserving data +-- integrity. -- -- ==== PHASE 1: HANDLE FOREIGN KEY DEPENDENCIES ==== -- Before we can drop the universe_leaves table, we need to temporarily remove --- any foreign key references pointing to it. The federation_proof_sync_log table --- has a foreign key to universe_leaves.id that would prevent dropping the table. +-- any foreign key references pointing to it. The federation_proof_sync_log +-- table has a foreign key to universe_leaves.id that would prevent dropping the +-- table. -- Create a temporary backup table for federation_proof_sync_log. CREATE TABLE new_federation_proof_sync_log ( id INTEGER PRIMARY KEY, - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), timestamp TIMESTAMP NOT NULL, attempt_counter BIGINT NOT NULL DEFAULT 0, - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), - proof_leaf_id BIGINT NOT NULL, -- FK constraint intentionally omitted for now. - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), + -- FK constraint intentionally omitted for now. + proof_leaf_id BIGINT NOT NULL, + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); @@ -61,23 +68,26 @@ SELECT * FROM federation_proof_sync_log; -- Remove the table with the foreign key constraint to universe_leaves. -- This allows us to safely drop universe_leaves later. DROP TABLE federation_proof_sync_log; -DROP INDEX IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; +DROP INDEX +IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id; -- ==== PHASE 2: CREATE NEW TABLE WITH UPDATED CONSTRAINT ==== --- Create a new universe_leaves table with the enhanced 3-column unique constraint. +-- Create a new universe_leaves table with the enhanced 3-column unique +-- constraint. CREATE TABLE new_universe_leaves ( id INTEGER PRIMARY KEY, - asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), minting_point BLOB NOT NULL, - script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + script_key_bytes BLOB NOT NULL CHECK (LENGTH(script_key_bytes) = 32), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), leaf_node_key BLOB, leaf_node_namespace VARCHAR NOT NULL ); -- Create the named unique index separately. This way it can be dropped later -- and we won't need as involved migrations in the future. -CREATE UNIQUE INDEX universe_leaves_unique_minting_script_namespace ON new_universe_leaves(minting_point, script_key_bytes, leaf_node_namespace); +CREATE UNIQUE INDEX universe_leaves_unique_minting_script_namespace +ON new_universe_leaves (minting_point, script_key_bytes, leaf_node_namespace); -- ==== PHASE 3: MIGRATE DATA ==== -- Copy all existing data from the original table to the new one. @@ -108,16 +118,21 @@ DROP TABLE universe_leaves; ALTER TABLE new_universe_leaves RENAME TO universe_leaves; -- Recreate indexes that existed on the original table. -CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves(leaf_node_key); -CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves(leaf_node_namespace); +CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves ( + leaf_node_key +); +CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves ( + leaf_node_namespace +); -- ==== PHASE 5: RESTORE DEPENDENT TABLES ==== --- Recreate the federation_proof_sync_log table with proper foreign key references. +-- Recreate the federation_proof_sync_log table with proper foreign key +-- references. CREATE TABLE federation_proof_sync_log ( id INTEGER PRIMARY KEY, -- The status of the proof sync attempt. - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), -- The timestamp of when the log entry for the associated proof was last -- updated. @@ -127,30 +142,32 @@ CREATE TABLE federation_proof_sync_log ( attempt_counter BIGINT NOT NULL DEFAULT 0, -- The direction of the proof sync attempt. - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), -- The ID of the subject proof leaf. - proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves (id), -- The ID of the universe that the proof leaf belongs to. - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), -- The ID of the server that the proof will be/was synced to. - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); -- Restore valid federation_proof_sync_log data. --- Only reinsert records that reference existing leaves in the universe_leaves table. +-- Only reinsert records that reference existing leaves in the universe_leaves +-- table. INSERT INTO federation_proof_sync_log SELECT * FROM new_federation_proof_sync_log -WHERE proof_leaf_id IN (SELECT id FROM universe_leaves); +WHERE proof_leaf_id IN (SELECT ul.id FROM universe_leaves AS ul); -- Clean up by dropping the temporary table. DROP TABLE new_federation_proof_sync_log; -- Re-create the unique index on table new_federation_proof_sync_log. -CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id -ON federation_proof_sync_log ( +CREATE UNIQUE INDEX +federation_proof_sync_log_unique_index_proof_leaf_id_servers_id ON +federation_proof_sync_log ( sync_direction, proof_leaf_id, universe_root_id, diff --git a/tapdb/sqlc/migrations/000032_asset_transfer_label.down.sql b/tapdb/sqlc/migrations/000032_asset_transfer_label.down.sql index 887407cc8..6c3d410f1 100644 --- a/tapdb/sqlc/migrations/000032_asset_transfer_label.down.sql +++ b/tapdb/sqlc/migrations/000032_asset_transfer_label.down.sql @@ -1,2 +1,2 @@ -- Remove `label` column from table `asset_transfer`. -ALTER TABLE asset_transfers DROP COLUMN label; \ No newline at end of file +ALTER TABLE asset_transfers DROP COLUMN label; diff --git a/tapdb/sqlc/migrations/000032_asset_transfer_label.up.sql b/tapdb/sqlc/migrations/000032_asset_transfer_label.up.sql index 6677fcac7..2ffbce5d2 100644 --- a/tapdb/sqlc/migrations/000032_asset_transfer_label.up.sql +++ b/tapdb/sqlc/migrations/000032_asset_transfer_label.up.sql @@ -1,2 +1,2 @@ -- Add a column `label` to table `asset_transfer`. -ALTER TABLE asset_transfers ADD COLUMN label VARCHAR DEFAULT NULL; \ No newline at end of file +ALTER TABLE asset_transfers ADD COLUMN label VARCHAR DEFAULT NULL; diff --git a/tapdb/sqlc/migrations/000034_script_key_drop_declared_known.down.sql b/tapdb/sqlc/migrations/000034_script_key_drop_declared_known.down.sql index e4ac3c55d..083f2ee53 100644 --- a/tapdb/sqlc/migrations/000034_script_key_drop_declared_known.down.sql +++ b/tapdb/sqlc/migrations/000034_script_key_drop_declared_known.down.sql @@ -1,2 +1 @@ ALTER TABLE script_keys ADD COLUMN declared_known BOOLEAN; - diff --git a/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.down.sql b/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.down.sql index e607d93e6..4145e8fba 100644 --- a/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.down.sql +++ b/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.down.sql @@ -1,2 +1,2 @@ -- Remove the skip_anchor_tx_broadcast flag from asset_transfers table. -ALTER TABLE asset_transfers DROP COLUMN skip_anchor_tx_broadcast; \ No newline at end of file +ALTER TABLE asset_transfers DROP COLUMN skip_anchor_tx_broadcast; diff --git a/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.up.sql b/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.up.sql index 58f47de97..50d495994 100644 --- a/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.up.sql +++ b/tapdb/sqlc/migrations/000035_asset_transfers_skip_anchor_broadcast.up.sql @@ -1,3 +1,4 @@ -- Add a flag to optionally skip anchor transaction broadcast for asset -- transfers. -ALTER TABLE asset_transfers ADD COLUMN skip_anchor_tx_broadcast BOOLEAN NOT NULL DEFAULT FALSE; \ No newline at end of file +ALTER TABLE asset_transfers +ADD COLUMN skip_anchor_tx_broadcast BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.down.sql b/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.down.sql index d71041d48..0bbbe4370 100644 --- a/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.down.sql +++ b/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.down.sql @@ -8,4 +8,4 @@ ALTER TABLE mint_anchor_uni_commitments DROP COLUMN taproot_internal_key_id; ALTER TABLE mint_anchor_uni_commitments -ADD COLUMN taproot_internal_key BLOB; \ No newline at end of file +ADD COLUMN taproot_internal_key BLOB; diff --git a/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.up.sql b/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.up.sql index b06570c39..cfc16c040 100644 --- a/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.up.sql +++ b/tapdb/sqlc/migrations/000036_mint_supply_commit_key_refs.up.sql @@ -2,7 +2,7 @@ -- to internal_keys table ALTER TABLE asset_seedlings ADD COLUMN delegation_key_id -BIGINT REFERENCES internal_keys(key_id); +BIGINT REFERENCES internal_keys (key_id); -- Replace taproot_internal_key column with taproot_internal_key_id -- as a foreign key to internal_keys table in mint_anchor_uni_commitments. @@ -13,5 +13,5 @@ DROP COLUMN taproot_internal_key; ALTER TABLE mint_anchor_uni_commitments ADD COLUMN taproot_internal_key_id -BIGINT REFERENCES internal_keys(key_id) -NOT NULL; \ No newline at end of file +BIGINT REFERENCES internal_keys (key_id) +NOT NULL; diff --git a/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.down.sql b/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.down.sql index 591692abd..8594c1902 100644 --- a/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.down.sql +++ b/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.down.sql @@ -5,16 +5,16 @@ CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected ( burn_id INTEGER PRIMARY KEY, -- A reference to the primary key of the transfer that includes this burn. - transfer_id INTEGER NOT NULL REFERENCES asset_transfers(id), + transfer_id INTEGER NOT NULL REFERENCES asset_transfers (id), -- noqa: LL01 -- A note that may contain user defined metadata. note TEXT, -- The asset id of the burnt asset. - asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id), + asset_id BLOB NOT NULL REFERENCES genesis_assets (asset_id), -- The group key of the group the burnt asset belonged to. - group_key BLOB REFERENCES asset_groups(tweaked_group_key), + group_key BLOB REFERENCES asset_groups (tweaked_group_key), -- The amount of the asset that was burned. amount BIGINT NOT NULL @@ -23,7 +23,13 @@ CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected ( INSERT INTO asset_burn_transfers_corrected ( burn_id, transfer_id, note, asset_id, group_key, amount ) -SELECT burn_id, transfer_id, note, asset_id, group_key, amount +SELECT + burn_id, + transfer_id, + note, + asset_id, + group_key, + amount FROM asset_burn_transfers; DROP TABLE asset_burn_transfers; diff --git a/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.up.sql b/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.up.sql index f0e578fb5..ede65593a 100644 --- a/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.up.sql +++ b/tapdb/sqlc/migrations/000037_insert_asset_burns_migration.up.sql @@ -6,16 +6,16 @@ CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected ( burn_id INTEGER PRIMARY KEY, -- A reference to the primary key of the transfer that includes this burn. - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), -- A note that may contain user defined metadata. note TEXT, -- The asset id of the burnt asset. - asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id), + asset_id BLOB NOT NULL REFERENCES genesis_assets (asset_id), -- The group key of the group the burnt asset belonged to. - group_key BLOB REFERENCES asset_groups(tweaked_group_key), + group_key BLOB REFERENCES asset_groups (tweaked_group_key), -- The amount of the asset that was burned. amount BIGINT NOT NULL @@ -24,7 +24,13 @@ CREATE TABLE IF NOT EXISTS asset_burn_transfers_corrected ( INSERT INTO asset_burn_transfers_corrected ( burn_id, transfer_id, note, asset_id, group_key, amount ) -SELECT burn_id, transfer_id, note, asset_id, group_key, amount +SELECT + burn_id, + transfer_id, + note, + asset_id, + group_key, + amount FROM asset_burn_transfers; DROP TABLE asset_burn_transfers; diff --git a/tapdb/sqlc/models.go b/tapdb/sqlc/models.go index 10749dfe0..e530043ed 100644 --- a/tapdb/sqlc/models.go +++ b/tapdb/sqlc/models.go @@ -399,9 +399,9 @@ type UniverseServer struct { } type UniverseStat struct { - TotalAssetSyncs int64 - TotalAssetProofs int64 AssetID []byte GroupKey []byte ProofType sql.NullString + TotalAssetSyncs int64 + TotalAssetProofs int64 } diff --git a/tapdb/sqlc/schemas/generated_schema.sql b/tapdb/sqlc/schemas/generated_schema.sql index 686104740..9c954f65e 100644 --- a/tapdb/sqlc/schemas/generated_schema.sql +++ b/tapdb/sqlc/schemas/generated_schema.sql @@ -9,14 +9,14 @@ CREATE TABLE addr_events ( creation_time TIMESTAMP NOT NULL, -- addr_id is the reference to the address this event was emitted for. - addr_id BIGINT NOT NULL REFERENCES addrs(id), + addr_id BIGINT NOT NULL REFERENCES addrs (id), -- status is the status of the inbound asset. status SMALLINT NOT NULL CHECK (status IN (0, 1, 2, 3)), -- chain_txn_id is a reference to the chain transaction that has the Taproot -- output for this event. - chain_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + chain_txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), -- chain_txn_output_index is the index of the on-chain output (of the -- transaction referenced by chain_txn_id) that houses the Taproot Asset @@ -25,18 +25,18 @@ CREATE TABLE addr_events ( -- managed_utxo_id is a reference to the managed UTXO the internal wallet -- tracks with on-chain funds that belong to us. - managed_utxo_id BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), + managed_utxo_id BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), -- asset_proof_id is a reference to the proof associated with this asset -- event. - asset_proof_id BIGINT REFERENCES asset_proofs(proof_id), - + asset_proof_id BIGINT REFERENCES asset_proofs (proof_id), + -- asset_id is a reference to the asset once we have taken custody of it. -- This will only be set once the proofs were imported successfully and the -- event is in the status complete. - asset_id BIGINT REFERENCES assets(asset_id), - - UNIQUE(addr_id, chain_txn_id, chain_txn_output_index) + asset_id BIGINT REFERENCES assets (asset_id), + + UNIQUE (addr_id, chain_txn_id, chain_txn_output_index) ); CREATE INDEX addr_group_keys ON addrs (group_key); @@ -54,7 +54,7 @@ CREATE TABLE addrs ( -- genesis_asset_id points to the asset genesis of the asset we want to -- send/recv. - genesis_asset_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + genesis_asset_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), -- group_key is the raw blob of the group key. For assets w/o a group key, -- this field will be NULL. @@ -62,11 +62,11 @@ CREATE TABLE addrs ( -- script_key_id points to the internal key that we created to serve as the -- script key to be able to receive this asset. - script_key_id BIGINT NOT NULL REFERENCES script_keys(script_key_id), + script_key_id BIGINT NOT NULL REFERENCES script_keys (script_key_id), -- taproot_key_id points to the internal key that we'll use to serve as the -- taproot internal key to receive this asset. - taproot_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + taproot_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- tapscript_sibling is the serialized tapscript sibling preimage that -- should be committed to in the taproot output alongside the Taproot Asset @@ -76,10 +76,12 @@ CREATE TABLE addrs ( -- taproot_output_key is the tweaked taproot output key that assets must -- be sent to on chain to be received, represented as a 32-byte x-only -- public key. - taproot_output_key BLOB NOT NULL UNIQUE CHECK(length(taproot_output_key) = 32), + taproot_output_key BLOB NOT NULL UNIQUE CHECK ( + length(taproot_output_key) = 32 + ), -- amount is the amount of asset we want to receive. - amount BIGINT NOT NULL, + amount BIGINT NOT NULL, -- asset_type is the type of asset we want to receive. asset_type SMALLINT NOT NULL, @@ -101,16 +103,16 @@ CREATE TABLE "asset_burn_transfers" ( burn_id INTEGER PRIMARY KEY, -- A reference to the primary key of the transfer that includes this burn. - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), -- A note that may contain user defined metadata. note TEXT, -- The asset id of the burnt asset. - asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id), + asset_id BLOB NOT NULL REFERENCES genesis_assets (asset_id), -- The group key of the group the burnt asset belonged to. - group_key BLOB REFERENCES asset_groups(tweaked_group_key), + group_key BLOB REFERENCES asset_groups (tweaked_group_key), -- The amount of the asset that was burned. amount BIGINT NOT NULL @@ -124,32 +126,35 @@ CREATE TABLE asset_group_witnesses ( witness_stack BLOB NOT NULL, -- TODO(roasbeef): not needed since already in assets row? - gen_asset_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id) UNIQUE, + gen_asset_id BIGINT NOT NULL + REFERENCES genesis_assets (gen_asset_id) UNIQUE, - group_key_id BIGINT NOT NULL REFERENCES asset_groups(group_id) + group_key_id BIGINT NOT NULL REFERENCES asset_groups (group_id) ); CREATE TABLE asset_groups ( group_id INTEGER PRIMARY KEY, - tweaked_group_key BLOB UNIQUE NOT NULL CHECK(length(tweaked_group_key) = 33), + tweaked_group_key BLOB UNIQUE NOT NULL + CHECK (length(tweaked_group_key) = 33), tapscript_root BLOB, -- TODO(roasbeef): also need to mix in output index here? to derive the -- genesis key? - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), - genesis_point_id BIGINT NOT NULL REFERENCES genesis_points(genesis_id) -, version INTEGER NOT NULL DEFAULT 0, custom_subtree_root_id INTEGER -REFERENCES tapscript_roots(root_id)); + genesis_point_id BIGINT NOT NULL REFERENCES genesis_points (genesis_id) +, version INTEGER NOT NULL DEFAULT 0, custom_subtree_root_id INTEGER -- noqa: LL01 +REFERENCES tapscript_roots (root_id)); -CREATE INDEX asset_id_idx ON addr_events(asset_id); +CREATE INDEX asset_id_idx ON addr_events (asset_id); -CREATE INDEX asset_ids on genesis_assets(asset_id); +CREATE INDEX asset_ids ON genesis_assets (asset_id); CREATE TABLE asset_minting_batches ( - batch_id INTEGER PRIMARY KEY REFERENCES internal_keys(key_id), + batch_id INTEGER PRIMARY KEY -- noqa: LL01 + REFERENCES internal_keys (key_id), -- TODO(guggero): Use BIGINT. -- TODO(roasbeef): make into proper enum table or use check to ensure -- proper values @@ -159,21 +164,21 @@ CREATE TABLE asset_minting_batches ( change_output_index INTEGER, - genesis_id BIGINT REFERENCES genesis_points(genesis_id), + genesis_id BIGINT REFERENCES genesis_points (genesis_id), height_hint INTEGER NOT NULL, creation_time_unix TIMESTAMP NOT NULL , tapscript_sibling BLOB, assets_output_index INTEGER, universe_commitments BOOLEAN NOT NULL DEFAULT FALSE); -CREATE INDEX asset_proof_id_idx ON addr_events(asset_proof_id); +CREATE INDEX asset_proof_id_idx ON addr_events (asset_proof_id); CREATE TABLE asset_proofs ( proof_id INTEGER PRIMARY KEY, -- We enforce that this value is unique so we can use an UPSERT to update a -- proof file that already exists. - asset_id BIGINT NOT NULL REFERENCES assets(asset_id) UNIQUE, + asset_id BIGINT NOT NULL REFERENCES assets (asset_id) UNIQUE, -- TODO(roasbef): store the merkle root separately? then can refer back to -- for all other files @@ -194,53 +199,53 @@ CREATE TABLE asset_seedlings ( asset_supply BIGINT NOT NULL, - asset_meta_id BIGINT NOT NULL REFERENCES assets_meta(meta_id), + asset_meta_id BIGINT NOT NULL REFERENCES assets_meta (meta_id), emission_enabled BOOLEAN NOT NULL, - batch_id BIGINT NOT NULL REFERENCES asset_minting_batches(batch_id), + batch_id BIGINT NOT NULL REFERENCES asset_minting_batches (batch_id), - group_genesis_id BIGINT REFERENCES genesis_assets(gen_asset_id), + group_genesis_id BIGINT REFERENCES genesis_assets (gen_asset_id), - group_anchor_id BIGINT REFERENCES asset_seedlings(seedling_id) -, script_key_id BIGINT REFERENCES script_keys(script_key_id), group_internal_key_id BIGINT REFERENCES internal_keys(key_id), group_tapscript_root BLOB, delegation_key_id -BIGINT REFERENCES internal_keys(key_id)); + group_anchor_id BIGINT REFERENCES asset_seedlings (seedling_id) +, script_key_id BIGINT REFERENCES script_keys (script_key_id), group_internal_key_id BIGINT REFERENCES internal_keys (key_id), group_tapscript_root BLOB, delegation_key_id +BIGINT REFERENCES internal_keys (key_id)); CREATE TABLE asset_transfer_inputs ( input_id INTEGER PRIMARY KEY, - - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), - + + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + anchor_point BLOB NOT NULL, - + asset_id BLOB NOT NULL, - + script_key BLOB NOT NULL, - + amount BIGINT NOT NULL ); CREATE TABLE asset_transfer_outputs ( output_id INTEGER PRIMARY KEY, - - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), - - anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), - - script_key BIGINT NOT NULL REFERENCES script_keys(script_key_id), - + + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + + anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), + + script_key BIGINT NOT NULL REFERENCES script_keys (script_key_id), + script_key_local BOOL NOT NULL, - + amount BIGINT NOT NULL, asset_version INTEGER NOT NULL, - + serialized_witnesses BLOB, - + split_commitment_root_hash BLOB, - + split_commitment_root_value BIGINT, - + proof_suffix BLOB, num_passive_assets INTEGER NOT NULL, @@ -259,11 +264,11 @@ ON asset_transfer_outputs ( ); CREATE TABLE asset_transfers ( - id INTEGER PRIMARY KEY, + id INTEGER PRIMARY KEY, height_hint INTEGER NOT NULL, - - anchor_txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + + anchor_txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), transfer_time_unix TIMESTAMP NOT NULL , label VARCHAR DEFAULT NULL, skip_anchor_tx_broadcast BOOLEAN NOT NULL DEFAULT FALSE); @@ -271,7 +276,7 @@ CREATE TABLE asset_transfers ( CREATE TABLE asset_witnesses ( witness_id INTEGER PRIMARY KEY, - asset_id BIGINT NOT NULL REFERENCES assets(asset_id) ON DELETE CASCADE, + asset_id BIGINT NOT NULL REFERENCES assets (asset_id) ON DELETE CASCADE, prev_out_point BLOB NOT NULL, @@ -287,21 +292,21 @@ CREATE TABLE asset_witnesses ( , witness_index INTEGER NOT NULL DEFAULT -1); CREATE UNIQUE INDEX asset_witnesses_asset_id_witness_index_unique - ON asset_witnesses ( - asset_id, witness_index - ); +ON asset_witnesses ( + asset_id, witness_index +); CREATE TABLE assets ( asset_id INTEGER PRIMARY KEY, - - genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + + genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), version INTEGER NOT NULL, - script_key_id BIGINT NOT NULL REFERENCES script_keys(script_key_id), + script_key_id BIGINT NOT NULL REFERENCES script_keys (script_key_id), -- TODO(roasbeef): don't need this after all? - asset_group_witness_id BIGINT REFERENCES asset_group_witnesses(witness_id), + asset_group_witness_id BIGINT REFERENCES asset_group_witnesses (witness_id), -- TODO(roasbeef): make into enum? script_version INTEGER NOT NULL, @@ -318,36 +323,37 @@ CREATE TABLE assets ( split_commitment_root_value BIGINT, - anchor_utxo_id BIGINT REFERENCES managed_utxos(utxo_id), - + anchor_utxo_id BIGINT REFERENCES managed_utxos (utxo_id), + -- A boolean that indicates that the asset was spent. This is only -- set for assets that were transferred in an active manner (as part of an -- user initiated transfer). Passive assets that are just re-anchored are -- updated in-place. spent BOOLEAN NOT NULL DEFAULT FALSE, - - UNIQUE(asset_id, genesis_id, script_key_id) + + UNIQUE (asset_id, genesis_id, script_key_id) ); CREATE UNIQUE INDEX assets_genesis_id_script_key_id_anchor_utxo_id_unique - ON assets ( - genesis_id, script_key_id, anchor_utxo_id - ); +ON assets ( + genesis_id, script_key_id, anchor_utxo_id +); CREATE TABLE assets_meta ( meta_id INTEGER PRIMARY KEY, - meta_data_hash BLOB UNIQUE CHECK(length(meta_data_hash) = 32), + meta_data_hash BLOB UNIQUE CHECK (length(meta_data_hash) = 32), -- TODO(roasbeef): also have other opque blob here for future fields? meta_data_blob BLOB, meta_data_type SMALLINT , meta_decimal_display INTEGER, meta_universe_commitments BOOL, meta_canonical_universes BLOB - CHECK(LENGTH(meta_canonical_universes) <= 4096), meta_delegation_key BLOB - CHECK(LENGTH(meta_delegation_key) <= 33)); +CHECK (LENGTH(meta_canonical_universes) <= 4096), meta_delegation_key BLOB +CHECK (LENGTH(meta_delegation_key) <= 33)); -CREATE INDEX batch_state_lookup on asset_minting_batches (batch_state); +CREATE INDEX batch_state_lookup +ON asset_minting_batches (batch_state); CREATE TABLE chain_txns ( txn_id INTEGER PRIMARY KEY, @@ -365,10 +371,10 @@ CREATE TABLE chain_txns ( tx_index INTEGER ); -CREATE INDEX creation_time_idx ON addr_events(creation_time); +CREATE INDEX creation_time_idx ON addr_events (creation_time); CREATE TABLE federation_global_sync_config ( - proof_type TEXT NOT NULL PRIMARY KEY REFERENCES proof_types(proof_type), + proof_type TEXT NOT NULL PRIMARY KEY REFERENCES proof_types (proof_type), allow_sync_insert BOOLEAN NOT NULL, allow_sync_export BOOLEAN NOT NULL ); @@ -377,7 +383,7 @@ CREATE TABLE federation_proof_sync_log ( id INTEGER PRIMARY KEY, -- The status of the proof sync attempt. - status TEXT NOT NULL CHECK(status IN ('pending', 'complete')), + status TEXT NOT NULL CHECK (status IN ('pending', 'complete')), -- The timestamp of when the log entry for the associated proof was last -- updated. @@ -387,20 +393,20 @@ CREATE TABLE federation_proof_sync_log ( attempt_counter BIGINT NOT NULL DEFAULT 0, -- The direction of the proof sync attempt. - sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')), + sync_direction TEXT NOT NULL CHECK (sync_direction IN ('push', 'pull')), -- The ID of the subject proof leaf. - proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id), + proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves (id), -- The ID of the universe that the proof leaf belongs to. - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), -- The ID of the server that the proof will be/was synced to. - servers_id BIGINT NOT NULL REFERENCES universe_servers(id) + servers_id BIGINT NOT NULL REFERENCES universe_servers (id) ); -CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id -ON federation_proof_sync_log ( +CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id ON +federation_proof_sync_log ( sync_direction, proof_leaf_id, universe_root_id, @@ -414,11 +420,11 @@ CREATE TABLE federation_uni_sync_config ( -- This field contains the byte serialized ID of the asset to which this -- configuration is applicable. - asset_id BLOB CHECK(length(asset_id) = 32) NULL, + asset_id BLOB CHECK (length(asset_id) = 32) NULL, -- This field contains the byte serialized compressed group key public key -- of the asset group to which this configuration is applicable. - group_key BLOB CHECK(LENGTH(group_key) = 33) NULL, + group_key BLOB CHECK (length(group_key) = 33) NULL, -- This field is an enum representing the proof type stored in the given -- universe. @@ -426,12 +432,12 @@ CREATE TABLE federation_uni_sync_config ( -- This field is a boolean that indicates whether or not the given universe -- should accept remote proof export via federation sync. - allow_sync_export BOOLEAN NOT NULL, proof_type TEXT REFERENCES proof_types(proof_type), + allow_sync_export BOOLEAN NOT NULL, proof_type TEXT REFERENCES proof_types (proof_type), -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ) ); @@ -442,7 +448,7 @@ CREATE TABLE genesis_assets ( asset_tag TEXT NOT NULL, - meta_data_id BIGINT REFERENCES assets_meta(meta_id), + meta_data_id BIGINT REFERENCES assets_meta (meta_id), output_index INTEGER NOT NULL, @@ -450,23 +456,29 @@ CREATE TABLE genesis_assets ( -- BIP PR asset_type SMALLINT NOT NULL, - genesis_point_id BIGINT NOT NULL REFERENCES genesis_points(genesis_id) + genesis_point_id BIGINT NOT NULL REFERENCES genesis_points (genesis_id) ); CREATE VIEW genesis_info_view AS - SELECT - gen_asset_id, asset_id, asset_tag, assets_meta.meta_data_hash meta_hash, - output_index, asset_type, genesis_points.prev_out prev_out, - chain_txns.txid anchor_txid, block_height - FROM genesis_assets - -- We do a LEFT JOIN here, as not every asset has a set of - -- metadata that matches the asset. - LEFT JOIN assets_meta - ON genesis_assets.meta_data_id = assets_meta.meta_id - JOIN genesis_points - ON genesis_assets.genesis_point_id = genesis_points.genesis_id - LEFT JOIN chain_txns - ON genesis_points.anchor_tx_id = chain_txns.txn_id; +SELECT + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash AS meta_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out, + chain_txns.txid AS anchor_txid, + chain_txns.block_height +FROM genesis_assets +-- We do a LEFT JOIN here, as not every asset has a set of +-- metadata that matches the asset. +LEFT JOIN assets_meta + ON genesis_assets.meta_data_id = assets_meta.meta_id +JOIN genesis_points + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +LEFT JOIN chain_txns + ON genesis_points.anchor_tx_id = chain_txns.txn_id; CREATE TABLE genesis_points ( genesis_id INTEGER PRIMARY KEY, @@ -474,20 +486,22 @@ CREATE TABLE genesis_points ( -- TODO(roasbeef): just need the input index here instead? prev_out BLOB UNIQUE NOT NULL, - anchor_tx_id BIGINT REFERENCES chain_txns(txn_id) + anchor_tx_id BIGINT REFERENCES chain_txns (txn_id) ); -CREATE INDEX idx_mssmt_nodes_composite -ON mssmt_nodes(namespace, key, hash_key, sum); +CREATE INDEX idx_mssmt_nodes_composite +ON mssmt_nodes (namespace, key, hash_key, sum); -CREATE INDEX idx_universe_roots_composite ON universe_roots(namespace_root, proof_type, asset_id); +CREATE INDEX idx_universe_roots_composite ON universe_roots ( + namespace_root, proof_type, asset_id +); CREATE TABLE internal_keys ( key_id INTEGER PRIMARY KEY, -- We'll always store the full 33-byte key on disk, to make sure we're -- retaining full information. - raw_key BLOB NOT NULL UNIQUE CHECK(length(raw_key) = 33), + raw_key BLOB NOT NULL UNIQUE CHECK (length(raw_key) = 33), key_family INTEGER NOT NULL, @@ -496,26 +510,35 @@ CREATE TABLE internal_keys ( CREATE VIEW key_group_info_view AS SELECT - groups.version, witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family, - substr(tweaked_group_key, 2) AS x_only_group_key, + grp.version, + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family, + substr(grp.tweaked_group_key, 2) AS x_only_group_key, tapscript_roots.root_hash AS custom_subtree_root -FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id - - -- Include the tapscript root hash for the custom subtree. Here we use - -- a LEFT JOIN to allow for the case where a group does not have a - -- custom subtree in which case the custom_subtree_root will be NULL. - LEFT JOIN tapscript_roots - ON groups.custom_subtree_root_id = tapscript_roots.root_id -WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info_view); +FROM asset_group_witnesses AS wit +JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id +JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id + +-- Include the tapscript root hash for the custom subtree. Here we use +-- a LEFT JOIN to allow for the case where a group does not have a +-- custom subtree in which case the custom_subtree_root will be NULL. +LEFT JOIN tapscript_roots + ON grp.custom_subtree_root_id = tapscript_roots.root_id +WHERE wit.gen_asset_id IN ( + SELECT giv.gen_asset_id FROM genesis_info_view AS giv +); CREATE TABLE macaroons ( id BLOB PRIMARY KEY, - root_key BLOB NOT NULL + root_key BLOB NOT NULL ); CREATE TABLE managed_utxos ( @@ -527,10 +550,10 @@ CREATE TABLE managed_utxos ( -- 64 bit issues? amt_sats BIGINT NOT NULL, - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- The Taproot Asset root commitment hash. - taproot_asset_root BLOB NOT NULL CHECK(length(taproot_asset_root) = 32), + taproot_asset_root BLOB NOT NULL CHECK (length(taproot_asset_root) = 32), -- The serialized tapscript sibling preimage. If this is empty then the -- Taproot Asset root commitment is equal to the merkle_root below. @@ -540,16 +563,16 @@ CREATE TABLE managed_utxos ( -- corresponds to the Taproot Asset root commitment hash. -- -- TODO(roasbeef): can then reconstruct on start up to ensure matches up - merkle_root BLOB NOT NULL CHECK(length(merkle_root) = 32), + merkle_root BLOB NOT NULL CHECK (length(merkle_root) = 32), - txn_id BIGINT NOT NULL REFERENCES chain_txns(txn_id), + txn_id BIGINT NOT NULL REFERENCES chain_txns (txn_id), -- The identity of the application that currently has a lease on this UTXO. -- If NULL, then the UTXO is not currently leased. A lease means that the -- UTXO is being reserved/locked to be spent in an upcoming transaction and -- that it should not be available for coin selection through any of the -- wallet RPCs. - lease_owner BLOB CHECK(length(lease_owner) = 32), + lease_owner BLOB CHECK (length(lease_owner) = 32), -- The absolute expiry of the lease in seconds as a Unix timestamp. If the -- expiry is NULL or the timestamp is in the past, then the lease is not @@ -561,7 +584,8 @@ CREATE TABLE mint_anchor_uni_commitments ( id INTEGER PRIMARY KEY, -- The ID of the minting batch this universe commitment relates to. - batch_id INTEGER NOT NULL REFERENCES asset_minting_batches(batch_id), + batch_id INTEGER NOT NULL -- noqa: LL01 + REFERENCES asset_minting_batches (batch_id), -- TODO(guggero): Use BIGINT. -- The index of the mint batch anchor transaction pre-commitment output. tx_output_index INTEGER NOT NULL, @@ -569,27 +593,27 @@ CREATE TABLE mint_anchor_uni_commitments ( -- The Taproot output internal key for the pre-commitment output. group_key BLOB , taproot_internal_key_id -BIGINT REFERENCES internal_keys(key_id) +BIGINT REFERENCES internal_keys (key_id) NOT NULL); CREATE UNIQUE INDEX mint_anchor_uni_commitments_unique - ON mint_anchor_uni_commitments (batch_id, tx_output_index); +ON mint_anchor_uni_commitments (batch_id, tx_output_index); CREATE TABLE mssmt_nodes ( -- hash_key is the hash key by which we reference all nodes. hash_key BLOB NOT NULL, - + -- l_hash_key is the hash key of the left child or NULL. If this is a -- branch then either l_hash_key or r_hash_key is not NULL. l_hash_key BLOB, - + -- r_hash_key is the hash key of the right child or NULL. If this is a -- branch then either l_hash_key or r_hash_key is not NULL. r_hash_key BLOB, - + -- key is the leaf key if this is a compacted leaf node. key BLOB, - + -- value is the leaf value if this is a leaf node. value BLOB, @@ -606,9 +630,13 @@ CREATE TABLE mssmt_nodes ( PRIMARY KEY (hash_key, namespace) ); -CREATE INDEX mssmt_nodes_l_hash_key_idx ON mssmt_nodes (l_hash_key); +CREATE INDEX mssmt_nodes_l_hash_key_idx ON mssmt_nodes ( + l_hash_key +); -CREATE INDEX mssmt_nodes_r_hash_key_idx ON mssmt_nodes (r_hash_key); +CREATE INDEX mssmt_nodes_r_hash_key_idx ON mssmt_nodes ( + r_hash_key +); CREATE TABLE mssmt_roots ( -- namespace allows us to store several root hash pointers for distinct @@ -618,28 +646,30 @@ CREATE TABLE mssmt_roots ( -- root_hash points to the root hash node of the MS-SMT tree. root_hash BLOB NOT NULL, - FOREIGN KEY (namespace, root_hash) REFERENCES mssmt_nodes (namespace, hash_key) ON DELETE CASCADE + FOREIGN KEY (namespace, root_hash) REFERENCES mssmt_nodes ( + namespace, hash_key + ) ON DELETE CASCADE ); CREATE TABLE multiverse_leaves ( id INTEGER PRIMARY KEY, - multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots(id), + multiverse_root_id BIGINT NOT NULL REFERENCES multiverse_roots (id), - asset_id BLOB CHECK(length(asset_id) = 32), + asset_id BLOB CHECK (length(asset_id) = 32), -- We use the 32 byte schnorr key here as this is what's used to derive the -- top-level Taproot Asset commitment key. - group_key BLOB CHECK(LENGTH(group_key) = 32), - + group_key BLOB CHECK (length(group_key) = 32), + leaf_node_key BLOB NOT NULL, leaf_node_namespace VARCHAR NOT NULL, -- Both the asset ID and group key cannot be null at the same time. CHECK ( - (asset_id IS NOT NULL AND group_key IS NULL) OR - (asset_id IS NULL AND group_key IS NOT NULL) + (asset_id IS NOT NULL AND group_key IS NULL) + OR (asset_id IS NULL AND group_key IS NOT NULL) ) ); @@ -655,21 +685,23 @@ CREATE TABLE multiverse_roots ( -- root of the SMT is deleted temporarily before inserting a new root, then -- this constraint is violated as there's no longer a root that this -- universe tree can point to. - namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots ( + namespace + ) DEFERRABLE INITIALLY DEFERRED, -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT NOT NULL CHECK(proof_type IN ('issuance', 'transfer')) + proof_type TEXT NOT NULL CHECK (proof_type IN ('issuance', 'transfer')) ); CREATE TABLE passive_assets ( passive_id INTEGER PRIMARY KEY, - transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + + asset_id BIGINT NOT NULL REFERENCES assets (asset_id), - asset_id BIGINT NOT NULL REFERENCES assets(asset_id), - - new_anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos(utxo_id), + new_anchor_utxo BIGINT NOT NULL REFERENCES managed_utxos (utxo_id), script_key BLOB NOT NULL, @@ -681,7 +713,7 @@ CREATE TABLE passive_assets ( ); CREATE INDEX passive_assets_idx - ON passive_assets (transfer_id); +ON passive_assets (transfer_id); CREATE INDEX proof_locator_hash_index ON proof_transfer_log (proof_locator_hash); @@ -691,7 +723,7 @@ CREATE TABLE proof_transfer_log ( -- delivery to the transfer counterparty or receiving a proof from the -- transfer counterparty. Note that the transfer counterparty is usually -- the proof courier service. - transfer_type TEXT NOT NULL CHECK(transfer_type IN ('send', 'receive')), + transfer_type TEXT NOT NULL CHECK (transfer_type IN ('send', 'receive')), proof_locator_hash BLOB NOT NULL, @@ -707,98 +739,110 @@ CREATE TABLE script_keys ( -- The actual internal key here that we hold the private key for. Applying -- the tweak to this gives us the tweaked_script_key. - internal_key_id BIGINT NOT NULL REFERENCES internal_keys(key_id), + internal_key_id BIGINT NOT NULL REFERENCES internal_keys (key_id), -- The script key after applying the tweak. This is what goes directly in -- the asset TLV. - tweaked_script_key BLOB NOT NULL UNIQUE CHECK(length(tweaked_script_key) = 33), + tweaked_script_key BLOB NOT NULL UNIQUE + CHECK (length(tweaked_script_key) = 33), -- An optional tweak for the script_key. If NULL, the raw_key may be -- tweaked BIP-0086 style. tweak BLOB , key_type SMALLINT); -CREATE INDEX status_idx ON addr_events(status); +CREATE INDEX status_idx ON addr_events (status); CREATE TABLE tapscript_edges ( - edge_id INTEGER PRIMARY KEY, + edge_id INTEGER PRIMARY KEY, - -- The root hash of a tree that includes the referenced tapscript node. - root_hash_id BIGINT NOT NULL REFERENCES tapscript_roots(root_id), + -- The root hash of a tree that includes the referenced tapscript node. + root_hash_id BIGINT NOT NULL REFERENCES tapscript_roots (root_id), - -- The index of the referenced node in the tapscript tree, which is - -- needed to correctly reconstruct the tapscript tree. - node_index BIGINT NOT NULL, + -- The index of the referenced node in the tapscript tree, which is + -- needed to correctly reconstruct the tapscript tree. + node_index BIGINT NOT NULL, - -- The tapscript node referenced by this edge. - raw_node_id BIGINT NOT NULL REFERENCES tapscript_nodes(node_id) + -- The tapscript node referenced by this edge. + raw_node_id BIGINT NOT NULL REFERENCES tapscript_nodes (node_id) ); CREATE UNIQUE INDEX tapscript_edges_unique ON tapscript_edges ( - root_hash_id, node_index, raw_node_id + root_hash_id, node_index, raw_node_id ); CREATE TABLE tapscript_nodes ( - node_id INTEGER PRIMARY KEY, + node_id INTEGER PRIMARY KEY, - -- The serialized tapscript node, which may be a tapHash or tapLeaf. - raw_node BLOB NOT NULL UNIQUE + -- The serialized tapscript node, which may be a tapHash or tapLeaf. + raw_node BLOB NOT NULL UNIQUE ); CREATE TABLE tapscript_roots ( - root_id INTEGER PRIMARY KEY, + root_id INTEGER PRIMARY KEY, - -- The root hash of a tapscript tree. - root_hash BLOB NOT NULL UNIQUE CHECK(length(root_hash) = 32), + -- The root hash of a tapscript tree. + root_hash BLOB NOT NULL UNIQUE CHECK (length(root_hash) = 32), - -- A flag to record if a tapscript tree was stored as two tapHashes, or - -- a set of tapLeafs. - branch_only BOOLEAN NOT NULL DEFAULT FALSE + -- A flag to record if a tapscript tree was stored as two tapHashes, or + -- a set of tapLeafs. + branch_only BOOLEAN NOT NULL DEFAULT FALSE ); CREATE INDEX transfer_inputs_idx - ON asset_transfer_inputs (transfer_id); +ON asset_transfer_inputs (transfer_id); CREATE INDEX transfer_outputs_idx - ON asset_transfer_outputs (transfer_id); +ON asset_transfer_outputs (transfer_id); CREATE INDEX transfer_time_idx - ON asset_transfers (transfer_time_unix); +ON asset_transfers (transfer_time_unix); CREATE INDEX transfer_txn_idx - ON asset_transfers (anchor_txn_id); +ON asset_transfers (anchor_txn_id); CREATE TABLE universe_events ( event_id INTEGER PRIMARY KEY, - event_type VARCHAR NOT NULL CHECK (event_type IN ('SYNC', 'NEW_PROOF', 'NEW_ROOT')), + event_type VARCHAR NOT NULL CHECK ( + event_type IN ('SYNC', 'NEW_PROOF', 'NEW_ROOT') + ), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), -- TODO(roasbeef): also add which leaf was synced? event_time TIMESTAMP NOT NULL , event_timestamp BIGINT NOT NULL DEFAULT 0); -CREATE INDEX universe_events_event_time_idx ON universe_events(event_time); +CREATE INDEX universe_events_event_time_idx ON universe_events ( + event_time +); -CREATE INDEX universe_events_type_idx ON universe_events(event_type); +CREATE INDEX universe_events_type_idx ON universe_events ( + event_type +); CREATE TABLE "universe_leaves" ( id INTEGER PRIMARY KEY, - asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id), + asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets (gen_asset_id), minting_point BLOB NOT NULL, - script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32), - universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id), + script_key_bytes BLOB NOT NULL CHECK (LENGTH(script_key_bytes) = 32), + universe_root_id BIGINT NOT NULL REFERENCES universe_roots (id), leaf_node_key BLOB, leaf_node_namespace VARCHAR NOT NULL ); -CREATE INDEX universe_leaves_key_idx ON universe_leaves(leaf_node_key); +CREATE INDEX universe_leaves_key_idx ON universe_leaves ( + leaf_node_key +); -CREATE INDEX universe_leaves_namespace ON universe_leaves(leaf_node_namespace); +CREATE INDEX universe_leaves_namespace ON universe_leaves ( + leaf_node_namespace +); -CREATE UNIQUE INDEX universe_leaves_unique_minting_script_namespace ON "universe_leaves"(minting_point, script_key_bytes, leaf_node_namespace); +CREATE UNIQUE INDEX universe_leaves_unique_minting_script_namespace +ON "universe_leaves" (minting_point, script_key_bytes, leaf_node_namespace); CREATE TABLE universe_roots ( id INTEGER PRIMARY KEY, @@ -808,21 +852,27 @@ CREATE TABLE universe_roots ( -- root of the SMT is deleted temporarily before inserting a new root, then -- this constraint is violated as there's no longer a root that this -- universe tree can point to. - namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots(namespace) DEFERRABLE INITIALLY DEFERRED, + namespace_root VARCHAR UNIQUE NOT NULL REFERENCES mssmt_roots ( + namespace + ) DEFERRABLE INITIALLY DEFERRED, asset_id BLOB, -- We use the 32 byte schnorr key here as this is what's used to derive the -- top-level Taproot Asset commitment key. - group_key BLOB CHECK(LENGTH(group_key) = 32), + group_key BLOB CHECK (LENGTH(group_key) = 32), -- This field is an enum representing the proof type stored in the given -- universe. - proof_type TEXT REFERENCES proof_types(proof_type)); + proof_type TEXT REFERENCES proof_types (proof_type)); -CREATE INDEX universe_roots_asset_id_idx ON universe_roots(asset_id); +CREATE INDEX universe_roots_asset_id_idx ON universe_roots ( + asset_id +); -CREATE INDEX universe_roots_group_key_idx ON universe_roots(group_key); +CREATE INDEX universe_roots_group_key_idx ON universe_roots ( + group_key +); CREATE TABLE universe_servers ( id INTEGER PRIMARY KEY, @@ -833,44 +883,58 @@ CREATE TABLE universe_servers ( last_sync_time TIMESTAMP NOT NULL - -- TODO(roasbeef): can also add stuff like filters re which items to sync, - -- etc? also sync mode, ones that should get everything pushed, etc +-- TODO(roasbeef): can also add stuff like filters re which items to sync, +-- etc? also sync mode, ones that should get everything pushed, etc ); -CREATE INDEX universe_servers_host ON universe_servers(server_host); +CREATE INDEX universe_servers_host ON universe_servers ( + server_host +); CREATE VIEW universe_stats AS WITH sync_counts AS ( - SELECT universe_root_id, COUNT(*) AS count + SELECT + universe_root_id, + COUNT(*) AS count FROM universe_events WHERE event_type = 'SYNC' GROUP BY universe_root_id -), proof_counts AS ( - SELECT universe_root_id, event_type, COUNT(*) AS count +), + +proof_counts AS ( + SELECT + universe_root_id, + event_type, + COUNT(*) AS count FROM universe_events WHERE event_type = 'NEW_PROOF' GROUP BY universe_root_id, event_type -), aggregated AS ( - SELECT COALESCE(SUM(count), 0) as total_asset_syncs, - 0 AS total_asset_proofs, - universe_root_id +), + +aggregated AS ( + SELECT + COALESCE(SUM(count), 0) AS total_asset_syncs, + 0 AS total_asset_proofs, + universe_root_id FROM sync_counts GROUP BY universe_root_id UNION ALL - SELECT 0 AS total_asset_syncs, - COALESCE(SUM(count), 0) as total_asset_proofs, - universe_root_id + SELECT + 0 AS total_asset_syncs, + COALESCE(SUM(count), 0) AS total_asset_proofs, + universe_root_id FROM proof_counts GROUP BY universe_root_id ) + SELECT - SUM(ag.total_asset_syncs) AS total_asset_syncs, - SUM(ag.total_asset_proofs) AS total_asset_proofs, roots.asset_id, roots.group_key, - roots.proof_type -FROM aggregated ag -JOIN universe_roots roots + roots.proof_type, + SUM(ag.total_asset_syncs) AS total_asset_syncs, + SUM(ag.total_asset_proofs) AS total_asset_proofs +FROM aggregated AS ag +INNER JOIN universe_roots AS roots ON ag.universe_root_id = roots.id GROUP BY roots.asset_id, roots.group_key, roots.proof_type ORDER BY roots.asset_id, roots.group_key, roots.proof_type; From 05fb8a59bdd3753f91ced95eef05163d3f653cb6 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 20 Jun 2025 15:46:04 +0200 Subject: [PATCH 2/5] sqldb: fix sqlfluff complaints for queries --- tapdb/sqlc/addrs.sql.go | 213 +++-- tapdb/sqlc/assets.sql.go | 1026 ++++++++++++++--------- tapdb/sqlc/macaroons.sql.go | 2 +- tapdb/sqlc/metadata.sql.go | 4 +- tapdb/sqlc/mssmt.sql.go | 99 ++- tapdb/sqlc/querier.go | 44 + tapdb/sqlc/queries/addrs.sql | 213 +++-- tapdb/sqlc/queries/assets.sql | 1026 ++++++++++++++--------- tapdb/sqlc/queries/macaroons.sql | 2 +- tapdb/sqlc/queries/metadata.sql | 4 +- tapdb/sqlc/queries/mssmt.sql | 99 ++- tapdb/sqlc/queries/transfers.sql | 179 ++-- tapdb/sqlc/queries/universe.sql | 669 ++++++++++----- tapdb/sqlc/schemas/generated_schema.sql | 6 +- tapdb/sqlc/transfers.sql.go | 179 ++-- tapdb/sqlc/universe.sql.go | 669 ++++++++++----- 16 files changed, 2785 insertions(+), 1649 deletions(-) diff --git a/tapdb/sqlc/addrs.sql.go b/tapdb/sqlc/addrs.sql.go index 75b395afd..c5ce18f8e 100644 --- a/tapdb/sqlc/addrs.sql.go +++ b/tapdb/sqlc/addrs.sql.go @@ -13,22 +13,30 @@ import ( const FetchAddrByTaprootOutputKey = `-- name: FetchAddrByTaprootOutputKey :one SELECT - version, asset_version, genesis_asset_id, group_key, tapscript_sibling, - taproot_output_key, amount, asset_type, creation_time, managed_from, - proof_courier_addr, - script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, - raw_script_keys.key_id, raw_script_keys.raw_key, raw_script_keys.key_family, raw_script_keys.key_index, + addrs.version, + addrs.asset_version, + addrs.genesis_asset_id, + addrs.group_key, + addrs.tapscript_sibling, + addrs.taproot_output_key, + addrs.amount, + addrs.asset_type, + addrs.creation_time, + addrs.managed_from, + addrs.proof_courier_addr, + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + raw_script_keys.key_id, raw_script_keys.raw_key, raw_script_keys.key_family, raw_script_keys.key_index, -- noqa: RF02,AL03 taproot_keys.raw_key AS raw_taproot_key, taproot_keys.key_family AS taproot_key_family, taproot_keys.key_index AS taproot_key_index FROM addrs JOIN script_keys - ON addrs.script_key_id = script_keys.script_key_id -JOIN internal_keys raw_script_keys - ON script_keys.internal_key_id = raw_script_keys.key_id -JOIN internal_keys taproot_keys - ON addrs.taproot_key_id = taproot_keys.key_id -WHERE taproot_output_key = $1 + ON addrs.script_key_id = script_keys.script_key_id +JOIN internal_keys AS raw_script_keys + ON script_keys.internal_key_id = raw_script_keys.key_id +JOIN internal_keys AS taproot_keys + ON addrs.taproot_key_id = taproot_keys.key_id +WHERE addrs.taproot_output_key = $1 ` type FetchAddrByTaprootOutputKeyRow struct { @@ -83,21 +91,24 @@ func (q *Queries) FetchAddrByTaprootOutputKey(ctx context.Context, taprootOutput const FetchAddrEvent = `-- name: FetchAddrEvent :one SELECT - creation_time, status, asset_proof_id, asset_id, - chain_txns.txid as txid, - chain_txns.block_height as confirmation_height, - chain_txn_output_index as output_index, - managed_utxos.amt_sats as amt_sats, - managed_utxos.tapscript_sibling as tapscript_sibling, - internal_keys.raw_key as internal_key + addr_events.creation_time, + addr_events.status, + addr_events.asset_proof_id, + addr_events.asset_id, + chain_txns.txid, + chain_txns.block_height AS confirmation_height, + addr_events.chain_txn_output_index AS output_index, + managed_utxos.amt_sats, + managed_utxos.tapscript_sibling, + internal_keys.raw_key AS internal_key FROM addr_events LEFT JOIN chain_txns - ON addr_events.chain_txn_id = chain_txns.txn_id + ON addr_events.chain_txn_id = chain_txns.txn_id LEFT JOIN managed_utxos - ON addr_events.managed_utxo_id = managed_utxos.utxo_id + ON addr_events.managed_utxo_id = managed_utxos.utxo_id LEFT JOIN internal_keys - ON managed_utxos.internal_key_id = internal_keys.key_id -WHERE id = $1 + ON managed_utxos.internal_key_id = internal_keys.key_id +WHERE addr_events.id = $1 ` type FetchAddrEventRow struct { @@ -132,30 +143,36 @@ func (q *Queries) FetchAddrEvent(ctx context.Context, id int64) (FetchAddrEventR } const FetchAddrEventByAddrKeyAndOutpoint = `-- name: FetchAddrEventByAddrKeyAndOutpoint :one -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 ) + SELECT - addr_events.id, creation_time, status, asset_proof_id, asset_id, - chain_txns.txid as txid, - chain_txns.block_height as confirmation_height, - chain_txn_output_index as output_index, - managed_utxos.amt_sats as amt_sats, - managed_utxos.tapscript_sibling as tapscript_sibling, - internal_keys.raw_key as internal_key + addr_events.id, + addr_events.creation_time, + addr_events.status, + addr_events.asset_proof_id, + addr_events.asset_id, + chain_txns.txid, + chain_txns.block_height AS confirmation_height, + addr_events.chain_txn_output_index AS output_index, + managed_utxos.amt_sats, + managed_utxos.tapscript_sibling, + internal_keys.raw_key AS internal_key FROM addr_events JOIN target_addr - ON addr_events.addr_id = target_addr.addr_id + ON addr_events.addr_id = target_addr.addr_id LEFT JOIN chain_txns - ON addr_events.chain_txn_id = chain_txns.txn_id + ON addr_events.chain_txn_id = chain_txns.txn_id LEFT JOIN managed_utxos - ON addr_events.managed_utxo_id = managed_utxos.utxo_id + ON addr_events.managed_utxo_id = managed_utxos.utxo_id LEFT JOIN internal_keys - ON managed_utxos.internal_key_id = internal_keys.key_id -WHERE chain_txns.txid = $2 - AND chain_txn_output_index = $3 + ON managed_utxos.internal_key_id = internal_keys.key_id +WHERE + chain_txns.txid = $2 + AND addr_events.chain_txn_output_index = $3 ` type FetchAddrEventByAddrKeyAndOutpointParams struct { @@ -198,26 +215,37 @@ func (q *Queries) FetchAddrEventByAddrKeyAndOutpoint(ctx context.Context, arg Fe } const FetchAddrs = `-- name: FetchAddrs :many -SELECT - version, asset_version, genesis_asset_id, group_key, tapscript_sibling, - taproot_output_key, amount, asset_type, creation_time, managed_from, - proof_courier_addr, - script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, - raw_script_keys.key_id, raw_script_keys.raw_key, raw_script_keys.key_family, raw_script_keys.key_index, - taproot_keys.raw_key AS raw_taproot_key, +SELECT + addrs.version, + addrs.asset_version, + addrs.genesis_asset_id, + addrs.group_key, + addrs.tapscript_sibling, + addrs.taproot_output_key, + addrs.amount, + addrs.asset_type, + addrs.creation_time, + addrs.managed_from, + addrs.proof_courier_addr, + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + raw_script_keys.key_id, raw_script_keys.raw_key, raw_script_keys.key_family, raw_script_keys.key_index, -- noqa: RF02,AL03 + taproot_keys.raw_key AS raw_taproot_key, taproot_keys.key_family AS taproot_key_family, taproot_keys.key_index AS taproot_key_index FROM addrs JOIN script_keys ON addrs.script_key_id = script_keys.script_key_id -JOIN internal_keys raw_script_keys +JOIN internal_keys AS raw_script_keys ON script_keys.internal_key_id = raw_script_keys.key_id -JOIN internal_keys taproot_keys +JOIN internal_keys AS taproot_keys ON addrs.taproot_key_id = taproot_keys.key_id -WHERE creation_time >= $1 - AND creation_time <= $2 - AND ($3 = false OR - (CASE WHEN managed_from IS NULL THEN true ELSE false END) = $3) +WHERE + addrs.creation_time >= $1 + AND addrs.creation_time <= $2 + AND ( + $3 = FALSE + OR (coalesce(addrs.managed_from IS NULL, FALSE)) = $3 + ) ORDER BY addrs.creation_time LIMIT $5 OFFSET $4 ` @@ -304,15 +332,18 @@ func (q *Queries) FetchAddrs(ctx context.Context, arg FetchAddrsParams) ([]Fetch const QueryEventIDs = `-- name: QueryEventIDs :many SELECT - addr_events.id as event_id, addrs.taproot_output_key as taproot_output_key + addr_events.id AS event_id, + addrs.taproot_output_key FROM addr_events JOIN addrs - ON addr_events.addr_id = addrs.id -WHERE addr_events.status >= $1 - AND addr_events.status <= $2 - AND COALESCE($3, addrs.taproot_output_key) = addrs.taproot_output_key - AND addr_events.creation_time >= $4 -ORDER by addr_events.creation_time + ON addr_events.addr_id = addrs.id +WHERE + addr_events.status >= $1 + AND addr_events.status <= $2 + AND coalesce($3, addrs.taproot_output_key) + = addrs.taproot_output_key + AND addr_events.creation_time >= $4 +ORDER BY addr_events.creation_time ` type QueryEventIDsParams struct { @@ -356,11 +387,12 @@ func (q *Queries) QueryEventIDs(ctx context.Context, arg QueryEventIDsParams) ([ } const SetAddrManaged = `-- name: SetAddrManaged :exec -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 ) + UPDATE addrs SET managed_from = $2 WHERE id = (SELECT addr_id FROM target_addr) @@ -392,31 +424,27 @@ INSERT INTO addrs ( proof_courier_addr ) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 -) +) ON CONFLICT (taproot_output_key) DO UPDATE SET - -- If the WHERE clause below is true (exact match on all other fields, - -- except for creation_time), we set taproot_output_key to its current - -- conflicting value. This is a no-op in terms of data change but allows - -- RETURNING id to work on the existing row. - taproot_output_key = excluded.taproot_output_key -WHERE - addrs.version = excluded.version - AND addrs.asset_version = excluded.asset_version - AND addrs.genesis_asset_id = excluded.genesis_asset_id - AND ( - (addrs.group_key IS NULL AND excluded.group_key IS NULL) - OR addrs.group_key = excluded.group_key - ) - AND addrs.script_key_id = excluded.script_key_id - AND addrs.taproot_key_id = excluded.taproot_key_id - AND ( - (addrs.tapscript_sibling IS NULL AND excluded.tapscript_sibling IS NULL) - OR addrs.tapscript_sibling = excluded.tapscript_sibling - ) - AND addrs.amount = excluded.amount - AND addrs.asset_type = excluded.asset_type - AND addrs.proof_courier_addr = excluded.proof_courier_addr +taproot_output_key = excluded.taproot_output_key +WHERE +addrs.version = excluded.version +AND addrs.asset_version = excluded.asset_version +AND addrs.genesis_asset_id = excluded.genesis_asset_id +AND ( + (addrs.group_key IS NULL AND excluded.group_key IS NULL) + OR addrs.group_key = excluded.group_key +) +AND addrs.script_key_id = excluded.script_key_id +AND addrs.taproot_key_id = excluded.taproot_key_id +AND ( + (addrs.tapscript_sibling IS NULL AND excluded.tapscript_sibling IS NULL) + OR addrs.tapscript_sibling = excluded.tapscript_sibling +) +AND addrs.amount = excluded.amount +AND addrs.asset_type = excluded.asset_type +AND addrs.proof_courier_addr = excluded.proof_courier_addr RETURNING id ` @@ -435,6 +463,10 @@ type UpsertAddrParams struct { ProofCourierAddr []byte } +// If the WHERE clause below is true (exact match on all other fields, +// except for creation_time), we set taproot_output_key to its current +// conflicting value. This is a no-op in terms of data change but allows +// RETURNING id to work on the existing row. func (q *Queries) UpsertAddr(ctx context.Context, arg UpsertAddrParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertAddr, arg.Version, @@ -456,15 +488,18 @@ func (q *Queries) UpsertAddr(ctx context.Context, arg UpsertAddrParams) (int64, } const UpsertAddrEvent = `-- name: UpsertAddrEvent :one -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 -), target_chain_txn(txn_id) AS ( - SELECT txn_id +), + +target_chain_txn (txn_id) AS ( + SELECT chain_txns.txn_id FROM chain_txns WHERE chain_txns.txid = $2 ) + INSERT INTO addr_events ( creation_time, addr_id, status, chain_txn_id, chain_txn_output_index, managed_utxo_id, asset_proof_id, asset_id @@ -473,9 +508,9 @@ INSERT INTO addr_events ( (SELECT txn_id FROM target_chain_txn), $5, $6, $7, $8 ) ON CONFLICT (addr_id, chain_txn_id, chain_txn_output_index) - DO UPDATE SET status = EXCLUDED.status, - asset_proof_id = COALESCE(EXCLUDED.asset_proof_id, addr_events.asset_proof_id), - asset_id = COALESCE(EXCLUDED.asset_id, addr_events.asset_id) +DO UPDATE SET status = excluded.status, +asset_proof_id = coalesce(excluded.asset_proof_id, addr_events.asset_proof_id), +asset_id = coalesce(excluded.asset_id, addr_events.asset_id) RETURNING id ` diff --git a/tapdb/sqlc/assets.sql.go b/tapdb/sqlc/assets.sql.go index 23a5ea693..56072f395 100644 --- a/tapdb/sqlc/assets.sql.go +++ b/tapdb/sqlc/assets.sql.go @@ -12,7 +12,7 @@ import ( ) const AllAssets = `-- name: AllAssets :many -SELECT asset_id, genesis_id, version, script_key_id, asset_group_witness_id, script_version, amount, lock_time, relative_lock_time, split_commitment_root_hash, split_commitment_root_value, anchor_utxo_id, spent +SELECT asset_id, genesis_id, version, script_key_id, asset_group_witness_id, script_version, amount, lock_time, relative_lock_time, split_commitment_root_hash, split_commitment_root_value, anchor_utxo_id, spent FROM assets ` @@ -54,7 +54,7 @@ func (q *Queries) AllAssets(ctx context.Context) ([]Asset, error) { } const AllInternalKeys = `-- name: AllInternalKeys :many -SELECT key_id, raw_key, key_family, key_index +SELECT key_id, raw_key, key_family, key_index FROM internal_keys ` @@ -87,10 +87,12 @@ func (q *Queries) AllInternalKeys(ctx context.Context) ([]InternalKey, error) { } const AllMintingBatches = `-- name: AllMintingBatches :many -SELECT batch_id, batch_state, minting_tx_psbt, change_output_index, genesis_id, height_hint, creation_time_unix, tapscript_sibling, assets_output_index, universe_commitments, key_id, raw_key, key_family, key_index -FROM asset_minting_batches -JOIN internal_keys -ON asset_minting_batches.batch_id = internal_keys.key_id +SELECT + batches.batch_id, batches.batch_state, batches.minting_tx_psbt, batches.change_output_index, batches.genesis_id, batches.height_hint, batches.creation_time_unix, batches.tapscript_sibling, batches.assets_output_index, batches.universe_commitments, + keys.key_id, keys.raw_key, keys.key_family, keys.key_index +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id ` type AllMintingBatchesRow struct { @@ -149,14 +151,15 @@ func (q *Queries) AllMintingBatches(ctx context.Context) ([]AllMintingBatchesRow } const AnchorGenesisPoint = `-- name: AnchorGenesisPoint :exec -WITH target_point(genesis_id) AS ( - SELECT genesis_id +WITH target_point (genesis_id) AS ( + SELECT genesis_points.genesis_id FROM genesis_points WHERE genesis_points.prev_out = $1 ) + UPDATE genesis_points SET anchor_tx_id = $2 -WHERE genesis_id in (SELECT genesis_id FROM target_point) +WHERE genesis_id IN (SELECT genesis_id FROM target_point) ` type AnchorGenesisPointParams struct { @@ -171,17 +174,18 @@ func (q *Queries) AnchorGenesisPoint(ctx context.Context, arg AnchorGenesisPoint const AnchorPendingAssets = `-- name: AnchorPendingAssets :exec WITH assets_to_update AS ( - SELECT script_key_id - FROM assets - JOIN genesis_assets + SELECT assets.script_key_id + FROM assets + JOIN genesis_assets ON assets.genesis_id = genesis_assets.gen_asset_id JOIN genesis_points - ON genesis_points.genesis_id = genesis_assets.genesis_point_id - WHERE prev_out = $1 + ON genesis_assets.genesis_point_id = genesis_points.genesis_id + WHERE genesis_points.prev_out = $1 ) + UPDATE assets SET anchor_utxo_id = $2 -WHERE script_key_id in (SELECT script_key_id FROM assets_to_update) +WHERE script_key_id IN (SELECT u.script_key_id FROM assets_to_update AS u) ` type AnchorPendingAssetsParams struct { @@ -195,13 +199,16 @@ func (q *Queries) AnchorPendingAssets(ctx context.Context, arg AnchorPendingAsse } const AssetsByGenesisPoint = `-- name: AssetsByGenesisPoint :many -SELECT assets.asset_id, assets.genesis_id, version, script_key_id, asset_group_witness_id, script_version, amount, lock_time, relative_lock_time, split_commitment_root_hash, split_commitment_root_value, anchor_utxo_id, spent, gen_asset_id, genesis_assets.asset_id, asset_tag, meta_data_id, output_index, asset_type, genesis_point_id, genesis_points.genesis_id, prev_out, anchor_tx_id -FROM assets -JOIN genesis_assets +SELECT + assets.asset_id, assets.genesis_id, assets.version, assets.script_key_id, assets.asset_group_witness_id, assets.script_version, assets.amount, assets.lock_time, assets.relative_lock_time, assets.split_commitment_root_hash, assets.split_commitment_root_value, assets.anchor_utxo_id, assets.spent, + genesis_assets.gen_asset_id, genesis_assets.asset_id, genesis_assets.asset_tag, genesis_assets.meta_data_id, genesis_assets.output_index, genesis_assets.asset_type, genesis_assets.genesis_point_id, + genesis_points.genesis_id, genesis_points.prev_out, genesis_points.anchor_tx_id +FROM assets +JOIN genesis_assets ON assets.genesis_id = genesis_assets.gen_asset_id JOIN genesis_points - ON genesis_points.genesis_id = genesis_assets.genesis_point_id -WHERE prev_out = $1 + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +WHERE genesis_points.prev_out = $1 ` type AssetsByGenesisPointRow struct { @@ -279,17 +286,22 @@ func (q *Queries) AssetsByGenesisPoint(ctx context.Context, prevOut []byte) ([]A const AssetsInBatch = `-- name: AssetsInBatch :many SELECT - gen_asset_id, asset_id, asset_tag, assets_meta.meta_data_hash, - output_index, asset_type, genesis_points.prev_out prev_out + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points ON genesis_assets.genesis_point_id = genesis_points.genesis_id -JOIN asset_minting_batches batches +JOIN asset_minting_batches AS batches ON genesis_points.genesis_id = batches.genesis_id -JOIN internal_keys keys - ON keys.key_id = batches.batch_id +JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ` @@ -336,15 +348,16 @@ func (q *Queries) AssetsInBatch(ctx context.Context, rawKey []byte) ([]AssetsInB const BindMintingBatchWithTapSibling = `-- name: BindMintingBatchWithTapSibling :exec WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches SET tapscript_sibling = $2 -WHERE batch_id IN (SELECT batch_id FROM target_batch) +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) ` type BindMintingBatchWithTapSiblingParams struct { @@ -359,16 +372,18 @@ func (q *Queries) BindMintingBatchWithTapSibling(ctx context.Context, arg BindMi const BindMintingBatchWithTx = `-- name: BindMintingBatchWithTx :one WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches -SET minting_tx_psbt = $2, change_output_index = $3, assets_output_index = $4, +SET + minting_tx_psbt = $2, change_output_index = $3, assets_output_index = $4, genesis_id = $5, universe_commitments = $6 -WHERE batch_id IN (SELECT batch_id FROM target_batch) +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) RETURNING batch_id ` @@ -419,18 +434,19 @@ func (q *Queries) ConfirmChainAnchorTx(ctx context.Context, arg ConfirmChainAnch } const ConfirmChainTx = `-- name: ConfirmChainTx :exec -WITH target_txn(txn_id) AS ( - SELECT anchor_tx_id - FROM genesis_points points - JOIN asset_minting_batches batches - ON batches.genesis_id = points.genesis_id - JOIN internal_keys keys +WITH target_txn (txn_id) AS ( + SELECT points.anchor_tx_id + FROM genesis_points AS points + JOIN asset_minting_batches AS batches + ON points.genesis_id = batches.genesis_id + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE chain_txns SET block_height = $2, block_hash = $3, tx_index = $4 -WHERE txn_id in (SELECT txn_id FROM target_txn) +WHERE txn_id IN (SELECT txn_id FROM target_txn) ` type ConfirmChainTxParams struct { @@ -453,9 +469,10 @@ func (q *Queries) ConfirmChainTx(ctx context.Context, arg ConfirmChainTxParams) const DeleteExpiredUTXOLeases = `-- name: DeleteExpiredUTXOLeases :exec UPDATE managed_utxos SET lease_owner = NULL, lease_expiry = NULL -WHERE lease_owner IS NOT NULL AND - lease_expiry IS NOT NULL AND - lease_expiry < $1 +WHERE + lease_owner IS NOT NULL + AND lease_expiry IS NOT NULL + AND lease_expiry < $1 ` func (q *Queries) DeleteExpiredUTXOLeases(ctx context.Context, now sql.NullTime) error { @@ -483,6 +500,7 @@ WITH tree_info AS ( ON tapscript_edges.root_hash_id = tapscript_roots.root_id WHERE tapscript_roots.root_hash = $1 ) + DELETE FROM tapscript_edges WHERE edge_id IN (SELECT edge_id FROM tree_info) ` @@ -496,9 +514,9 @@ const DeleteTapscriptTreeNodes = `-- name: DeleteTapscriptTreeNodes :exec DELETE FROM tapscript_nodes WHERE NOT EXISTS ( SELECT 1 - FROM tapscript_edges - -- Delete any node that is not referenced by any edge. - WHERE tapscript_edges.raw_node_id = tapscript_nodes.node_id + FROM tapscript_edges + -- Delete any node that is not referenced by any edge. + WHERE tapscript_edges.raw_node_id = tapscript_nodes.node_id ) ` @@ -529,17 +547,21 @@ func (q *Queries) DeleteUTXOLease(ctx context.Context, outpoint []byte) error { } const FetchAssetID = `-- name: FetchAssetID :many -SELECT asset_id - FROM assets - JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id - WHERE - (script_keys.tweaked_script_key = $1 - OR $1 IS NULL) - AND (utxos.outpoint = $2 - OR $2 IS NULL) +SELECT assets.asset_id +FROM assets +JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id +JOIN managed_utxos AS utxos + ON assets.anchor_utxo_id = utxos.utxo_id +WHERE + ( + script_keys.tweaked_script_key = $1 + OR $1 IS NULL + ) + AND ( + utxos.outpoint = $2 + OR $2 IS NULL + ) ` type FetchAssetIDParams struct { @@ -571,7 +593,7 @@ func (q *Queries) FetchAssetID(ctx context.Context, arg FetchAssetIDParams) ([]i } const FetchAssetMeta = `-- name: FetchAssetMeta :one -SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key +SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key -- noqa: RF02,AL03 FROM assets_meta WHERE meta_id = $1 ` @@ -597,7 +619,7 @@ func (q *Queries) FetchAssetMeta(ctx context.Context, metaID int64) (FetchAssetM } const FetchAssetMetaByHash = `-- name: FetchAssetMetaByHash :one -SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key +SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key -- noqa: RF02,AL03 FROM assets_meta WHERE meta_data_hash = $1 ` @@ -623,8 +645,8 @@ func (q *Queries) FetchAssetMetaByHash(ctx context.Context, metaDataHash []byte) } const FetchAssetMetaForAsset = `-- name: FetchAssetMetaForAsset :one -SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key -FROM genesis_assets assets +SELECT assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key -- noqa: RF02,AL03 +FROM genesis_assets AS assets JOIN assets_meta ON assets.meta_data_id = assets_meta.meta_id WHERE assets.asset_id = $1 @@ -652,24 +674,38 @@ func (q *Queries) FetchAssetMetaForAsset(ctx context.Context, assetID []byte) (F const FetchAssetProof = `-- name: FetchAssetProof :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key, utxos.outpoint + SELECT + assets.asset_id, + script_keys.tweaked_script_key, + utxos.outpoint FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id - JOIN managed_utxos utxos + JOIN managed_utxos AS utxos ON assets.anchor_utxo_id = utxos.utxo_id JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - WHERE script_keys.tweaked_script_key = $1 - AND (utxos.outpoint = $2 OR $2 IS NULL) - AND (genesis_assets.asset_id = $3 OR $3 IS NULL) + ON assets.genesis_id = genesis_assets.gen_asset_id + WHERE + script_keys.tweaked_script_key = $1 + AND ( + utxos.outpoint = $2 + OR $2 IS NULL + ) + AND ( + genesis_assets.asset_id = $3 + OR $3 IS NULL + ) ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file, - asset_info.asset_id as asset_id, asset_proofs.proof_id as proof_id, - asset_info.outpoint as outpoint + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file, + asset_info.asset_id, + asset_proofs.proof_id, + asset_info.outpoint FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id + ON asset_proofs.asset_id = asset_info.asset_id ` type FetchAssetProofParams struct { @@ -717,15 +753,20 @@ func (q *Queries) FetchAssetProof(ctx context.Context, arg FetchAssetProofParams const FetchAssetProofs = `-- name: FetchAssetProofs :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key + SELECT + assets.asset_id, + script_keys.tweaked_script_key FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id + ON asset_proofs.asset_id = asset_info.asset_id ` type FetchAssetProofsRow struct { @@ -758,18 +799,23 @@ func (q *Queries) FetchAssetProofs(ctx context.Context) ([]FetchAssetProofsRow, const FetchAssetProofsByAssetID = `-- name: FetchAssetProofsByAssetID :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key + SELECT + assets.asset_id, + script_keys.tweaked_script_key FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id - JOIN genesis_assets gen + JOIN genesis_assets AS gen ON assets.genesis_id = gen.gen_asset_id WHERE gen.asset_id = $1 ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id + ON asset_proofs.asset_id = asset_info.asset_id ` type FetchAssetProofsByAssetIDRow struct { @@ -801,8 +847,9 @@ func (q *Queries) FetchAssetProofsByAssetID(ctx context.Context, assetID []byte) } const FetchAssetProofsSizes = `-- name: FetchAssetProofsSizes :many -SELECT script_keys.tweaked_script_key AS script_key, - LENGTH(asset_proofs.proof_file) AS proof_file_length +SELECT + script_keys.tweaked_script_key AS script_key, + LENGTH(asset_proofs.proof_file) AS proof_file_length FROM asset_proofs JOIN assets ON asset_proofs.asset_id = assets.asset_id @@ -839,16 +886,20 @@ func (q *Queries) FetchAssetProofsSizes(ctx context.Context) ([]FetchAssetProofs } const FetchAssetWitnesses = `-- name: FetchAssetWitnesses :many -SELECT - assets.asset_id, prev_out_point, prev_asset_id, prev_script_key, - witness_stack, split_commitment_proof +SELECT + assets.asset_id, + asset_witnesses.prev_out_point, + asset_witnesses.prev_asset_id, + asset_witnesses.prev_script_key, + asset_witnesses.witness_stack, + asset_witnesses.split_commitment_proof FROM asset_witnesses JOIN assets ON asset_witnesses.asset_id = assets.asset_id WHERE ( (assets.asset_id = $1) OR ($1 IS NULL) ) -ORDER BY witness_index +ORDER BY asset_witnesses.witness_index ` type FetchAssetWitnessesRow struct { @@ -942,49 +993,68 @@ WITH genesis_info AS ( -- points, to the internal key that reference the batch, then restricted -- for internal keys that match our main batch key. SELECT - gen_asset_id, asset_id, asset_tag, output_index, asset_type, - genesis_points.prev_out prev_out, + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out, assets_meta.meta_id FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points ON genesis_assets.genesis_point_id = genesis_points.genesis_id - JOIN asset_minting_batches batches + JOIN asset_minting_batches AS batches ON genesis_points.genesis_id = batches.genesis_id - JOIN internal_keys keys - ON keys.key_id = batches.batch_id + JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 -), key_group_info AS ( +), + +key_group_info AS ( -- This CTE is used to perform a series of joins that allow us to extract -- the group key information, as well as the group sigs for the series of -- assets we care about. We obtain only the assets found in the batch -- above, with the WHERE query at the bottom. - SELECT - witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family - FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id + SELECT + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family + FROM asset_group_witnesses AS wit + JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id + JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id -- TODO(roasbeef): or can join do this below? - WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info) + WHERE wit.gen_asset_id IN (SELECT gi.gen_asset_id FROM genesis_info AS gi) ) -SELECT - version, - script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, - internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index, - key_group_info.tapscript_root, - key_group_info.witness_stack, + +SELECT + assets.version, + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index, -- noqa: RF02,AL03 + key_group_info.tapscript_root, + key_group_info.witness_stack, key_group_info.tweaked_group_key, key_group_info.raw_key AS group_key_raw, key_group_info.key_family AS group_key_family, key_group_info.key_index AS group_key_index, - script_version, amount, lock_time, relative_lock_time, spent, - genesis_info.asset_id, genesis_info.asset_tag, - assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key, - genesis_info.output_index AS genesis_output_index, genesis_info.asset_type, + assets.script_version, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + assets.spent, + genesis_info.asset_id, + genesis_info.asset_tag, + assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key, -- noqa: RF02,AL03 + genesis_info.output_index AS genesis_output_index, + genesis_info.asset_type, genesis_info.prev_out AS genesis_prev_out FROM assets JOIN genesis_info @@ -994,7 +1064,7 @@ LEFT JOIN assets_meta LEFT JOIN key_group_info ON assets.genesis_id = key_group_info.gen_asset_id JOIN script_keys - on assets.script_key_id = script_keys.script_key_id + ON assets.script_key_id = script_keys.script_key_id JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id ` @@ -1107,7 +1177,7 @@ func (q *Queries) FetchChainTx(ctx context.Context, txid []byte) (ChainTxn, erro } const FetchGenesisByAssetID = `-- name: FetchGenesisByAssetID :one -SELECT gen_asset_id, asset_id, asset_tag, meta_hash, output_index, asset_type, prev_out, anchor_txid, block_height +SELECT gen_asset_id, asset_id, asset_tag, meta_hash, output_index, asset_type, prev_out, anchor_txid, block_height FROM genesis_info_view WHERE asset_id = $1 ` @@ -1131,14 +1201,18 @@ func (q *Queries) FetchGenesisByAssetID(ctx context.Context, assetID []byte) (Ge const FetchGenesisByID = `-- name: FetchGenesisByID :one SELECT - asset_id, asset_tag, assets_meta.meta_data_hash, output_index, asset_type, - genesis_points.prev_out prev_out + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points - ON genesis_assets.genesis_point_id = genesis_points.genesis_id -WHERE gen_asset_id = $1 + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +WHERE genesis_assets.gen_asset_id = $1 ` type FetchGenesisByIDRow struct { @@ -1165,22 +1239,25 @@ func (q *Queries) FetchGenesisByID(ctx context.Context, genAssetID int64) (Fetch } const FetchGenesisID = `-- name: FetchGenesisID :one -WITH target_point(genesis_id) AS ( - SELECT genesis_id +WITH target_point (genesis_id) AS ( + SELECT genesis_points.genesis_id FROM genesis_points WHERE genesis_points.prev_out = $6 ) -SELECT gen_asset_id + +SELECT genesis_assets.gen_asset_id FROM genesis_assets -LEFT JOIN assets_meta +LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id WHERE ( - genesis_assets.genesis_point_id IN (SELECT genesis_id FROM target_point) AND - genesis_assets.asset_id = $1 AND - genesis_assets.asset_tag = $2 AND - assets_meta.meta_data_hash = $3 AND - genesis_assets.output_index = $4 AND - genesis_assets.asset_type = $5 + genesis_assets.genesis_point_id IN ( + SELECT tp.genesis_id FROM target_point AS tp + ) + AND genesis_assets.asset_id = $1 + AND genesis_assets.asset_tag = $2 + AND assets_meta.meta_data_hash = $3 + AND genesis_assets.output_index = $4 + AND genesis_assets.asset_type = $5 ) ` @@ -1221,7 +1298,7 @@ func (q *Queries) FetchGenesisIDByAssetID(ctx context.Context, assetID []byte) ( } const FetchGenesisPointByAnchorTx = `-- name: FetchGenesisPointByAnchorTx :one -SELECT genesis_id, prev_out, anchor_tx_id +SELECT genesis_id, prev_out, anchor_tx_id FROM genesis_points WHERE anchor_tx_id = $1 ` @@ -1235,14 +1312,14 @@ func (q *Queries) FetchGenesisPointByAnchorTx(ctx context.Context, anchorTxID sq const FetchGroupByGenesis = `-- name: FetchGroupByGenesis :one SELECT - key_group_info_view.version AS version, - key_group_info_view.tweaked_group_key AS tweaked_group_key, - key_group_info_view.raw_key AS raw_key, - key_group_info_view.key_index AS key_index, - key_group_info_view.key_family AS key_family, - key_group_info_view.tapscript_root AS tapscript_root, - key_group_info_view.witness_stack AS witness_stack, - key_group_info_view.custom_subtree_root AS custom_subtree_root + key_group_info_view.version, + key_group_info_view.tweaked_group_key, + key_group_info_view.raw_key, + key_group_info_view.key_index, + key_group_info_view.key_family, + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, + key_group_info_view.custom_subtree_root FROM key_group_info_view WHERE ( key_group_info_view.gen_asset_id = $1 @@ -1278,14 +1355,14 @@ func (q *Queries) FetchGroupByGenesis(ctx context.Context, genesisID int64) (Fet const FetchGroupByGroupKey = `-- name: FetchGroupByGroupKey :one SELECT - key_group_info_view.version AS version, - key_group_info_view.gen_asset_id AS gen_asset_id, - key_group_info_view.raw_key AS raw_key, - key_group_info_view.key_index AS key_index, - key_group_info_view.key_family AS key_family, - key_group_info_view.tapscript_root AS tapscript_root, - key_group_info_view.witness_stack AS witness_stack, - key_group_info_view.custom_subtree_root AS custom_subtree_root + key_group_info_view.version, + key_group_info_view.gen_asset_id, + key_group_info_view.raw_key, + key_group_info_view.key_index, + key_group_info_view.key_family, + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, + key_group_info_view.custom_subtree_root FROM key_group_info_view WHERE ( key_group_info_view.tweaked_group_key = $1 @@ -1325,10 +1402,13 @@ func (q *Queries) FetchGroupByGroupKey(ctx context.Context, groupKey []byte) (Fe const FetchGroupedAssets = `-- name: FetchGroupedAssets :many SELECT assets.asset_id AS asset_primary_key, - amount, lock_time, relative_lock_time, spent, - genesis_info_view.asset_id AS asset_id, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + assets.spent, + genesis_info_view.asset_id, genesis_info_view.asset_tag, - genesis_info_view.meta_Hash, + genesis_info_view.meta_hash, genesis_info_view.asset_type, key_group_info_view.tweaked_group_key, assets.version AS asset_version @@ -1337,7 +1417,7 @@ JOIN genesis_info_view ON assets.genesis_id = genesis_info_view.gen_asset_id JOIN key_group_info_view ON assets.genesis_id = key_group_info_view.gen_asset_id -WHERE spent = false +WHERE assets.spent = FALSE ` type FetchGroupedAssetsRow struct { @@ -1390,7 +1470,9 @@ func (q *Queries) FetchGroupedAssets(ctx context.Context) ([]FetchGroupedAssetsR } const FetchInternalKeyLocator = `-- name: FetchInternalKeyLocator :one -SELECT key_family, key_index +SELECT + key_family, + key_index FROM internal_keys WHERE raw_key = $1 ` @@ -1408,13 +1490,17 @@ func (q *Queries) FetchInternalKeyLocator(ctx context.Context, rawKey []byte) (F } const FetchManagedUTXO = `-- name: FetchManagedUTXO :one -SELECT utxo_id, outpoint, amt_sats, internal_key_id, taproot_asset_root, tapscript_sibling, merkle_root, txn_id, lease_owner, lease_expiry, root_version, key_id, raw_key, key_family, key_index -FROM managed_utxos utxos -JOIN internal_keys keys +SELECT + utxos.utxo_id, utxos.outpoint, utxos.amt_sats, utxos.internal_key_id, utxos.taproot_asset_root, utxos.tapscript_sibling, utxos.merkle_root, utxos.txn_id, utxos.lease_owner, utxos.lease_expiry, utxos.root_version, + keys.key_id, keys.raw_key, keys.key_family, keys.key_index +FROM managed_utxos AS utxos +JOIN internal_keys AS keys ON utxos.internal_key_id = keys.key_id WHERE ( - (txn_id = $1 OR $1 IS NULL) AND - (utxos.outpoint = $2 OR $2 IS NULL) + (utxos.txn_id = $1 OR $1 IS NULL) + AND ( + utxos.outpoint = $2 OR $2 IS NULL + ) ) ` @@ -1465,9 +1551,11 @@ func (q *Queries) FetchManagedUTXO(ctx context.Context, arg FetchManagedUTXOPara } const FetchManagedUTXOs = `-- name: FetchManagedUTXOs :many -SELECT utxo_id, outpoint, amt_sats, internal_key_id, taproot_asset_root, tapscript_sibling, merkle_root, txn_id, lease_owner, lease_expiry, root_version, key_id, raw_key, key_family, key_index -FROM managed_utxos utxos -JOIN internal_keys keys +SELECT + utxos.utxo_id, utxos.outpoint, utxos.amt_sats, utxos.internal_key_id, utxos.taproot_asset_root, utxos.tapscript_sibling, utxos.merkle_root, utxos.txn_id, utxos.lease_owner, utxos.lease_expiry, utxos.root_version, + keys.key_id, keys.raw_key, keys.key_family, keys.key_index +FROM managed_utxos AS utxos +JOIN internal_keys AS keys ON utxos.internal_key_id = keys.key_id ` @@ -1530,24 +1618,33 @@ func (q *Queries) FetchManagedUTXOs(ctx context.Context) ([]FetchManagedUTXOsRow const FetchMintAnchorUniCommitment = `-- name: FetchMintAnchorUniCommitment :many SELECT - mint_anchor_uni_commitments.id, - mint_anchor_uni_commitments.batch_id, - mint_anchor_uni_commitments.tx_output_index, - mint_anchor_uni_commitments.group_key, + commitments.id, + commitments.batch_id, + commitments.tx_output_index, + commitments.group_key, batch_internal_keys.raw_key AS batch_key, - mint_anchor_uni_commitments.taproot_internal_key_id, - taproot_internal_keys.key_id, taproot_internal_keys.raw_key, taproot_internal_keys.key_family, taproot_internal_keys.key_index -FROM mint_anchor_uni_commitments - JOIN internal_keys taproot_internal_keys - ON mint_anchor_uni_commitments.taproot_internal_key_id = taproot_internal_keys.key_id - LEFT JOIN asset_minting_batches batches - ON mint_anchor_uni_commitments.batch_id = batches.batch_id - LEFT JOIN internal_keys batch_internal_keys - ON batches.batch_id = batch_internal_keys.key_id + commitments.taproot_internal_key_id, + taproot_internal_keys.key_id, taproot_internal_keys.raw_key, taproot_internal_keys.key_family, taproot_internal_keys.key_index -- noqa: RF02,AL03 +FROM mint_anchor_uni_commitments AS commitments +JOIN internal_keys AS taproot_internal_keys + ON commitments.taproot_internal_key_id = taproot_internal_keys.key_id +LEFT JOIN asset_minting_batches AS batches + ON commitments.batch_id = batches.batch_id +LEFT JOIN internal_keys AS batch_internal_keys + ON batches.batch_id = batch_internal_keys.key_id WHERE ( - (batch_internal_keys.raw_key = $1 OR $1 IS NULL) AND - (mint_anchor_uni_commitments.group_key = $2 OR $2 IS NULL) AND - (taproot_internal_keys.raw_key = $3 OR $3 IS NULL) + ( + batch_internal_keys.raw_key = $1 + OR $1 IS NULL + ) + AND ( + commitments.group_key = $2 + OR $2 IS NULL + ) + AND ( + taproot_internal_keys.raw_key = $3 + OR $3 IS NULL + ) ) ` @@ -1609,17 +1706,20 @@ WITH target_batch AS ( -- internal key associated with the batch. This internal key is used as the -- actual Taproot internal key to ultimately mint the batch. This pattern -- is used in several other queries. - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -SELECT batch_id, batch_state, minting_tx_psbt, change_output_index, genesis_id, height_hint, creation_time_unix, tapscript_sibling, assets_output_index, universe_commitments, key_id, raw_key, key_family, key_index -FROM asset_minting_batches batches -JOIN internal_keys keys + +SELECT + batches.batch_id, batches.batch_state, batches.minting_tx_psbt, batches.change_output_index, batches.genesis_id, batches.height_hint, batches.creation_time_unix, batches.tapscript_sibling, batches.assets_output_index, batches.universe_commitments, + keys.key_id, keys.raw_key, keys.key_family, keys.key_index +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys ON batches.batch_id = keys.key_id -WHERE batch_id in (SELECT batch_id FROM target_batch) +WHERE batches.batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) ` type FetchMintingBatchRow struct { @@ -1662,9 +1762,11 @@ func (q *Queries) FetchMintingBatch(ctx context.Context, rawKey []byte) (FetchMi } const FetchMintingBatchesByInverseState = `-- name: FetchMintingBatchesByInverseState :many -SELECT batch_id, batch_state, minting_tx_psbt, change_output_index, genesis_id, height_hint, creation_time_unix, tapscript_sibling, assets_output_index, universe_commitments, key_id, raw_key, key_family, key_index -FROM asset_minting_batches batches -JOIN internal_keys keys +SELECT + batches.batch_id, batches.batch_state, batches.minting_tx_psbt, batches.change_output_index, batches.genesis_id, batches.height_hint, batches.creation_time_unix, batches.tapscript_sibling, batches.assets_output_index, batches.universe_commitments, + keys.key_id, keys.raw_key, keys.key_family, keys.key_index +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE batches.batch_state != $1 ` @@ -1725,10 +1827,12 @@ func (q *Queries) FetchMintingBatchesByInverseState(ctx context.Context, batchSt } const FetchScriptKeyByTweakedKey = `-- name: FetchScriptKeyByTweakedKey :one -SELECT script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index +SELECT + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index -- noqa: RF02,AL03 FROM script_keys JOIN internal_keys - ON script_keys.internal_key_id = internal_keys.key_id + ON script_keys.internal_key_id = internal_keys.key_id WHERE script_keys.tweaked_script_key = $1 ` @@ -1801,15 +1905,16 @@ WITH target_key_id AS ( -- associated with a given batch. This can only return one value in -- practice since raw_key is a unique field. We then use this value below -- to select only from seedlings in the specified batch. - SELECT key_id - FROM internal_keys keys + SELECT keys.key_id + FROM internal_keys AS keys WHERE keys.raw_key = $2 ) -SELECT seedling_id + +SELECT asset_seedlings.seedling_id FROM asset_seedlings WHERE ( - asset_seedlings.batch_id in (SELECT key_id FROM target_key_id) AND - asset_seedlings.asset_name = $1 + asset_seedlings.batch_id IN (SELECT tki.key_id FROM target_key_id AS tki) + AND asset_seedlings.asset_name = $1 ) ` @@ -1826,17 +1931,26 @@ func (q *Queries) FetchSeedlingID(ctx context.Context, arg FetchSeedlingIDParams } const FetchSeedlingsForBatch = `-- name: FetchSeedlingsForBatch :many -WITH target_batch(batch_id) AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys +WITH target_batch (batch_id) AS ( + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -SELECT seedling_id, asset_name, asset_type, asset_version, asset_supply, - assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key, - emission_enabled, batch_id, - group_genesis_id, group_anchor_id, group_tapscript_root, + +SELECT + asset_seedlings.seedling_id, + asset_seedlings.asset_name, + asset_seedlings.asset_type, + asset_seedlings.asset_version, + asset_seedlings.asset_supply, + assets_meta.meta_id, assets_meta.meta_data_hash, assets_meta.meta_data_blob, assets_meta.meta_data_type, assets_meta.meta_decimal_display, assets_meta.meta_universe_commitments, assets_meta.meta_canonical_universes, assets_meta.meta_delegation_key, -- noqa: RF02,AL03 + asset_seedlings.emission_enabled, + asset_seedlings.batch_id, + asset_seedlings.group_genesis_id, + asset_seedlings.group_anchor_id, + asset_seedlings.group_tapscript_root, -- TODO(guggero): We should use sqlc.embed() for the script key and internal -- key fields, but we can't because it's a LEFT JOIN. We should check if the -- LEFT JOIN is actually necessary or if we always have keys for seedlings. @@ -1852,18 +1966,18 @@ SELECT seedling_id, asset_name, asset_type, asset_version, asset_supply, delegation_internal_keys.raw_key AS delegation_key_raw, delegation_internal_keys.key_family AS delegation_key_fam, delegation_internal_keys.key_index AS delegation_key_index -FROM asset_seedlings +FROM asset_seedlings LEFT JOIN assets_meta ON asset_seedlings.asset_meta_id = assets_meta.meta_id LEFT JOIN script_keys ON asset_seedlings.script_key_id = script_keys.script_key_id LEFT JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id -LEFT JOIN internal_keys group_internal_keys +LEFT JOIN internal_keys AS group_internal_keys ON asset_seedlings.group_internal_key_id = group_internal_keys.key_id -LEFT JOIN internal_keys delegation_internal_keys +LEFT JOIN internal_keys AS delegation_internal_keys ON asset_seedlings.delegation_key_id = delegation_internal_keys.key_id -WHERE asset_seedlings.batch_id in (SELECT batch_id FROM target_batch) +WHERE asset_seedlings.batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) ` type FetchSeedlingsForBatchRow struct { @@ -1951,17 +2065,22 @@ WITH tree_info AS ( -- This CTE is used to fetch all edges that link the given tapscript tree -- root hash to child nodes. Each edge also contains the index of the child -- node in the tapscript tree. - SELECT tapscript_roots.branch_only, tapscript_edges.raw_node_id, + SELECT + tapscript_roots.branch_only, + tapscript_edges.raw_node_id, tapscript_edges.node_index FROM tapscript_roots JOIN tapscript_edges ON tapscript_roots.root_id = tapscript_edges.root_hash_id WHERE tapscript_roots.root_hash = $1 ) -SELECT tree_info.branch_only, tapscript_nodes.raw_node + +SELECT + tree_info.branch_only, + tapscript_nodes.raw_node FROM tapscript_nodes JOIN tree_info - ON tree_info.raw_node_id = tapscript_nodes.node_id + ON tapscript_nodes.node_id = tree_info.raw_node_id ORDER BY tree_info.node_index ASC ` @@ -1995,10 +2114,12 @@ func (q *Queries) FetchTapscriptTree(ctx context.Context, rootHash []byte) ([]Fe } const FetchUnknownTypeScriptKeys = `-- name: FetchUnknownTypeScriptKeys :many -SELECT script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index +SELECT + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index -- noqa: RF02,AL03 FROM script_keys JOIN internal_keys - ON script_keys.internal_key_id = internal_keys.key_id + ON script_keys.internal_key_id = internal_keys.key_id WHERE script_keys.key_type IS NULL ` @@ -2041,7 +2162,7 @@ func (q *Queries) FetchUnknownTypeScriptKeys(ctx context.Context) ([]FetchUnknow } const GenesisAssets = `-- name: GenesisAssets :many -SELECT gen_asset_id, asset_id, asset_tag, meta_data_id, output_index, asset_type, genesis_point_id +SELECT gen_asset_id, asset_id, asset_tag, meta_data_id, output_index, asset_type, genesis_point_id FROM genesis_assets ` @@ -2112,10 +2233,11 @@ WITH asset_info AS ( ON assets.script_key_id = script_keys.script_key_id WHERE script_keys.tweaked_script_key = $1 ) -SELECT COUNT(asset_info.asset_id) > 0 as has_proof + +SELECT COUNT(asset_info.asset_id) > 0 AS has_proof FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id + ON asset_proofs.asset_id = asset_info.asset_id ` func (q *Queries) HasAssetProof(ctx context.Context, tweakedScriptKey []byte) (bool, error) { @@ -2129,13 +2251,14 @@ const InsertAssetSeedling = `-- name: InsertAssetSeedling :exec INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, emission_enabled, batch_id, group_genesis_id, group_anchor_id, - script_key_id, group_internal_key_id, group_tapscript_root, delegation_key_id + script_key_id, group_internal_key_id, group_tapscript_root, + delegation_key_id ) VALUES ( - $1, $2, $3, $4, - $5, $6, $7, - $8, $9, - $10, $11, - $12, $13 + $1, $2, $3, $4, + $5, $6, $7, + $8, $9, + $10, $11, + $12, $13 ) ` @@ -2181,11 +2304,12 @@ WITH target_key_id AS ( -- practice since raw_key is a unique field. We then use this value below -- to insert the seedling and point to the proper batch_id, which is a -- foreign key that references the key_id of the internal key. - SELECT key_id - FROM internal_keys keys + SELECT keys.key_id + FROM internal_keys AS keys WHERE keys.raw_key = $1 ) -INSERT INTO asset_seedlings( + +INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, emission_enabled, batch_id, group_genesis_id, group_anchor_id, script_key_id, group_internal_key_id, group_tapscript_root, @@ -2254,39 +2378,59 @@ func (q *Queries) NewMintingBatch(ctx context.Context, arg NewMintingBatchParams const QueryAssetBalancesByAsset = `-- name: QueryAssetBalancesByAsset :many SELECT - genesis_info_view.asset_id, SUM(amount) balance, - genesis_info_view.asset_tag, genesis_info_view.meta_hash, - genesis_info_view.asset_type, genesis_info_view.output_index, + genesis_info_view.asset_id, + SUM(assets.amount) AS balance, + genesis_info_view.asset_tag, + genesis_info_view.meta_hash, + genesis_info_view.asset_type, + genesis_info_view.output_index, genesis_info_view.prev_out AS genesis_point FROM assets JOIN genesis_info_view - ON assets.genesis_id = genesis_info_view.gen_asset_id AND - (genesis_info_view.asset_id = $1 OR - $1 IS NULL) -LEFT JOIN key_group_info_view + ON + assets.genesis_id = genesis_info_view.gen_asset_id + AND ( + genesis_info_view.asset_id = $1 + OR $1 IS NULL + ) +LEFT JOIN key_group_info_view -- noqa: ST11 ON assets.genesis_id = key_group_info_view.gen_asset_id -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - CASE - WHEN $2 = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > $3) - WHEN $2 = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= $3) - ELSE TRUE - END +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND CASE + WHEN $2 = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > $3 + ) + WHEN $2 = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= $3 + ) + ELSE TRUE + END JOIN script_keys ON assets.script_key_id = script_keys.script_key_id -WHERE spent = FALSE AND - (script_keys.key_type != $4 OR - $4 IS NULL) AND - ($5 = script_keys.key_type OR - $5 IS NULL) -GROUP BY assets.genesis_id, genesis_info_view.asset_id, - genesis_info_view.asset_tag, genesis_info_view.meta_hash, - genesis_info_view.asset_type, genesis_info_view.output_index, - genesis_info_view.prev_out +WHERE + assets.spent = FALSE + AND ( + script_keys.key_type != $4 + OR $4 IS NULL + ) + AND ( + $5 = script_keys.key_type + OR $5 IS NULL + ) +GROUP BY + assets.genesis_id, genesis_info_view.asset_id, + genesis_info_view.asset_tag, genesis_info_view.meta_hash, + genesis_info_view.asset_type, genesis_info_view.output_index, + genesis_info_view.prev_out ` type QueryAssetBalancesByAssetParams struct { @@ -2350,30 +2494,48 @@ func (q *Queries) QueryAssetBalancesByAsset(ctx context.Context, arg QueryAssetB const QueryAssetBalancesByGroup = `-- name: QueryAssetBalancesByGroup :many SELECT - key_group_info_view.tweaked_group_key, SUM(amount) balance + key_group_info_view.tweaked_group_key, + SUM(assets.amount) AS balance FROM assets JOIN key_group_info_view - ON assets.genesis_id = key_group_info_view.gen_asset_id AND - (key_group_info_view.tweaked_group_key = $1 OR - $1 IS NULL) -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - CASE - WHEN $2 = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > $3) - WHEN $2 = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= $3) - ELSE TRUE - END + ON + assets.genesis_id = key_group_info_view.gen_asset_id + AND ( + key_group_info_view.tweaked_group_key + = $1 + OR $1 IS NULL + ) +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND CASE + WHEN $2 = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > $3 + ) + WHEN $2 = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= $3 + ) + ELSE TRUE + END JOIN script_keys ON assets.script_key_id = script_keys.script_key_id -WHERE spent = FALSE AND - (script_keys.key_type != $4 OR - $4 IS NULL) AND - ($5 = script_keys.key_type OR - $5 IS NULL) +WHERE + assets.spent = FALSE + AND ( + script_keys.key_type != $4 + OR $4 IS NULL + ) + AND ( + $5 = script_keys.key_type + OR $5 IS NULL + ) GROUP BY key_group_info_view.tweaked_group_key ` @@ -2422,19 +2584,24 @@ func (q *Queries) QueryAssetBalancesByGroup(ctx context.Context, arg QueryAssetB const QueryAssets = `-- name: QueryAssets :many SELECT assets.asset_id AS asset_primary_key, - assets.genesis_id, assets.version, spent, - script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, - internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index, - key_group_info_view.tapscript_root, - key_group_info_view.witness_stack, + assets.genesis_id, + assets.version, + assets.spent, + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + internal_keys.key_id, internal_keys.raw_key, internal_keys.key_family, internal_keys.key_index, -- noqa: RF02,AL03 + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, key_group_info_view.tweaked_group_key, key_group_info_view.raw_key AS group_key_raw, key_group_info_view.key_family AS group_key_family, key_group_info_view.key_index AS group_key_index, - script_version, amount, lock_time, relative_lock_time, - genesis_info_view.asset_id AS asset_id, + assets.script_version, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + genesis_info_view.asset_id, genesis_info_view.asset_tag, - genesis_info_view.meta_hash, + genesis_info_view.meta_hash, genesis_info_view.output_index AS genesis_output_index, genesis_info_view.asset_type, genesis_info_view.prev_out AS genesis_prev_out, @@ -2450,49 +2617,74 @@ SELECT utxos.lease_owner AS anchor_lease_owner, utxos.lease_expiry AS anchor_lease_expiry, utxo_internal_keys.raw_key AS anchor_internal_key, - split_commitment_root_hash, split_commitment_root_value + assets.split_commitment_root_hash, + assets.split_commitment_root_value FROM assets JOIN genesis_info_view - ON assets.genesis_id = genesis_info_view.gen_asset_id AND - (genesis_info_view.asset_id = $1 OR - $1 IS NULL) + ON + assets.genesis_id = genesis_info_view.gen_asset_id + AND ( + genesis_info_view.asset_id = $1 + OR $1 IS NULL + ) LEFT JOIN key_group_info_view ON assets.genesis_id = key_group_info_view.gen_asset_id JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id AND - (script_keys.tweaked_script_key = $2 OR - $2 IS NULL) + ON + assets.script_key_id = script_keys.script_key_id + AND ( + script_keys.tweaked_script_key = $2 + OR $2 IS NULL + ) JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - (utxos.outpoint = $3 OR - $3 IS NULL) AND - CASE - WHEN $4 = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > $5) - WHEN $4 = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= $5) - ELSE TRUE - END -JOIN internal_keys utxo_internal_keys +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND ( + utxos.outpoint = $3 + OR $3 IS NULL + ) + AND CASE + WHEN $4 = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > $5 + ) + WHEN $4 = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= $5 + ) + ELSE TRUE + END +JOIN internal_keys AS utxo_internal_keys ON utxos.internal_key_id = utxo_internal_keys.key_id -JOIN chain_txns txns - ON utxos.txn_id = txns.txn_id AND - COALESCE(txns.block_height, 0) >= COALESCE($6, txns.block_height, 0) +JOIN chain_txns AS txns + ON + utxos.txn_id = txns.txn_id + AND COALESCE(txns.block_height, 0) + >= COALESCE($6, txns.block_height, 0) WHERE ( - assets.amount >= COALESCE($7, assets.amount) AND - assets.amount <= COALESCE($8, assets.amount) AND - assets.spent = COALESCE($9, assets.spent) AND - (key_group_info_view.tweaked_group_key = $10 OR - $10 IS NULL) AND - assets.anchor_utxo_id = COALESCE($11, assets.anchor_utxo_id) AND - assets.genesis_id = COALESCE($12, assets.genesis_id) AND - assets.script_key_id = COALESCE($13, assets.script_key_id) AND - ($14 = script_keys.key_type OR - $14 IS NULL) + assets.amount >= COALESCE($7, assets.amount) + AND assets.amount <= COALESCE($8, assets.amount) + AND assets.spent = COALESCE($9, assets.spent) + AND ( + key_group_info_view.tweaked_group_key = $10 + OR $10 IS NULL + ) + AND assets.anchor_utxo_id + = COALESCE($11, assets.anchor_utxo_id) + AND assets.genesis_id = COALESCE($12, assets.genesis_id) + AND assets.script_key_id + = COALESCE($13, assets.script_key_id) + AND ( + $14 = script_keys.key_type + OR $14 IS NULL + ) ) ` @@ -2643,20 +2835,25 @@ func (q *Queries) QueryAssets(ctx context.Context, arg QueryAssetsParams) ([]Que } const SetAssetSpent = `-- name: SetAssetSpent :one -WITH target_asset(asset_id) AS ( +WITH target_asset (asset_id) AS ( SELECT assets.asset_id FROM assets JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id + ON assets.script_key_id = script_keys.script_key_id JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - (utxos.outpoint = $1 OR - $1 IS NULL) - WHERE script_keys.tweaked_script_key = $2 - AND genesis_assets.asset_id = $3 + ON assets.genesis_id = genesis_assets.gen_asset_id + JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND ( + utxos.outpoint = $1 + OR $1 IS NULL + ) + WHERE + script_keys.tweaked_script_key = $2 + AND genesis_assets.asset_id = $3 ) + UPDATE assets SET spent = TRUE WHERE asset_id = (SELECT asset_id FROM target_asset) @@ -2678,15 +2875,16 @@ func (q *Queries) SetAssetSpent(ctx context.Context, arg SetAssetSpentParams) (i const UpdateBatchGenesisTx = `-- name: UpdateBatchGenesisTx :exec WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches SET minting_tx_psbt = $2 -WHERE batch_id in (SELECT batch_id FROM target_batch) +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) ` type UpdateBatchGenesisTxParams struct { @@ -2705,15 +2903,16 @@ WITH target_batch AS ( -- internal key associated with the batch. This internal key is used as the -- actual Taproot internal key to ultimately mint the batch. This pattern -- is used in several other queries. - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -UPDATE asset_minting_batches + +UPDATE asset_minting_batches SET batch_state = $2 -WHERE batch_id in (SELECT batch_id FROM target_batch) +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) ` type UpdateMintingBatchStateParams struct { @@ -2745,15 +2944,13 @@ func (q *Queries) UpdateUTXOLease(ctx context.Context, arg UpdateUTXOLeaseParams const UpsertAsset = `-- name: UpsertAsset :one INSERT INTO assets ( - genesis_id, version, script_key_id, asset_group_witness_id, script_version, + genesis_id, version, script_key_id, asset_group_witness_id, script_version, amount, lock_time, relative_lock_time, anchor_utxo_id, spent ) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 ) ON CONFLICT (genesis_id, script_key_id, anchor_utxo_id) - -- This is a NOP, anchor_utxo_id is one of the unique fields that caused the - -- conflict. - DO UPDATE SET anchor_utxo_id = EXCLUDED.anchor_utxo_id +DO UPDATE SET anchor_utxo_id = excluded.anchor_utxo_id RETURNING asset_id ` @@ -2770,6 +2967,8 @@ type UpsertAssetParams struct { Spent bool } +// This is a NOP, anchor_utxo_id is one of the unique fields that caused the +// conflict. func (q *Queries) UpsertAsset(ctx context.Context, arg UpsertAssetParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertAsset, arg.GenesisID, @@ -2795,9 +2994,7 @@ INSERT INTO asset_groups ( ) VALUES ( $1, $2, $3, $4, $5, $6 ) ON CONFLICT (tweaked_group_key) - -- This is not a NOP, update the genesis point ID in case it wasn't set - -- before. - DO UPDATE SET genesis_point_id = EXCLUDED.genesis_point_id +DO UPDATE SET genesis_point_id = excluded.genesis_point_id RETURNING group_id ` @@ -2810,6 +3007,8 @@ type UpsertAssetGroupKeyParams struct { CustomSubtreeRootID sql.NullInt32 } +// This is not a NOP, update the genesis point ID in case it wasn't set +// before. func (q *Queries) UpsertAssetGroupKey(ctx context.Context, arg UpsertAssetGroupKeyParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertAssetGroupKey, arg.Version, @@ -2830,8 +3029,7 @@ INSERT INTO asset_group_witnesses ( ) VALUES ( $1, $2, $3 ) ON CONFLICT (gen_asset_id) - -- This is a NOP, gen_asset_id is the unique field that caused the conflict. - DO UPDATE SET gen_asset_id = EXCLUDED.gen_asset_id +DO UPDATE SET gen_asset_id = excluded.gen_asset_id RETURNING witness_id ` @@ -2841,6 +3039,7 @@ type UpsertAssetGroupWitnessParams struct { GroupKeyID int64 } +// This is a NOP, gen_asset_id is the unique field that caused the conflict. func (q *Queries) UpsertAssetGroupWitness(ctx context.Context, arg UpsertAssetGroupWitnessParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertAssetGroupWitness, arg.WitnessStack, arg.GenAssetID, arg.GroupKeyID) var witness_id int64 @@ -2855,16 +3054,22 @@ INSERT INTO assets_meta ( ) VALUES ( $1, $2, $3, $4, $5, $6, $7 ) ON CONFLICT (meta_data_hash) - -- In this case, we may be inserting the data+type for an existing blob. So - -- we'll set all of those values. At this layer we assume the meta hash - -- has been validated elsewhere. - DO UPDATE SET meta_data_blob = COALESCE(EXCLUDED.meta_data_blob, assets_meta.meta_data_blob), - meta_data_type = COALESCE(EXCLUDED.meta_data_type, assets_meta.meta_data_type), - meta_decimal_display = COALESCE(EXCLUDED.meta_decimal_display, assets_meta.meta_decimal_display), - meta_universe_commitments = COALESCE(EXCLUDED.meta_universe_commitments, assets_meta.meta_universe_commitments), - meta_canonical_universes = COALESCE(EXCLUDED.meta_canonical_universes, assets_meta.meta_canonical_universes), - meta_delegation_key = COALESCE(EXCLUDED.meta_delegation_key, assets_meta.meta_delegation_key) - +DO UPDATE SET meta_data_blob += COALESCE(excluded.meta_data_blob, assets_meta.meta_data_blob), +meta_data_type = COALESCE(excluded.meta_data_type, assets_meta.meta_data_type), +meta_decimal_display += COALESCE(excluded.meta_decimal_display, assets_meta.meta_decimal_display), +meta_universe_commitments += COALESCE( + excluded.meta_universe_commitments, assets_meta.meta_universe_commitments +), +meta_canonical_universes += COALESCE( + excluded.meta_canonical_universes, assets_meta.meta_canonical_universes +), +meta_delegation_key += COALESCE(excluded.meta_delegation_key, assets_meta.meta_delegation_key) + RETURNING meta_id ` @@ -2878,6 +3083,9 @@ type UpsertAssetMetaParams struct { MetaDelegationKey []byte } +// In this case, we may be inserting the data+type for an existing blob. So +// we'll set all of those values. At this layer we assume the meta hash +// has been validated elsewhere. func (q *Queries) UpsertAssetMeta(ctx context.Context, arg UpsertAssetMetaParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertAssetMeta, arg.MetaDataHash, @@ -2899,8 +3107,7 @@ INSERT INTO asset_proofs ( ) VALUES ( $1, $2 ) ON CONFLICT (asset_id) - -- This is not a NOP, we always overwrite the proof with the new one. - DO UPDATE SET proof_file = EXCLUDED.proof_file +DO UPDATE SET proof_file = excluded.proof_file ` type UpsertAssetProofByIDParams struct { @@ -2908,6 +3115,7 @@ type UpsertAssetProofByIDParams struct { ProofFile []byte } +// This is not a NOP, we always overwrite the proof with the new one. func (q *Queries) UpsertAssetProofByID(ctx context.Context, arg UpsertAssetProofByIDParams) error { _, err := q.db.ExecContext(ctx, UpsertAssetProofByID, arg.AssetID, arg.ProofFile) return err @@ -2919,13 +3127,12 @@ INSERT INTO asset_witnesses ( split_commitment_proof, witness_index ) VALUES ( $1, $2, $3, $4, $5, $6, $7 -) ON CONFLICT (asset_id, witness_index) - -- We overwrite the witness with the new one. - DO UPDATE SET prev_out_point = EXCLUDED.prev_out_point, - prev_asset_id = EXCLUDED.prev_asset_id, - prev_script_key = EXCLUDED.prev_script_key, - witness_stack = EXCLUDED.witness_stack, - split_commitment_proof = EXCLUDED.split_commitment_proof +) ON CONFLICT (asset_id, witness_index) +DO UPDATE SET prev_out_point = excluded.prev_out_point, +prev_asset_id = excluded.prev_asset_id, +prev_script_key = excluded.prev_script_key, +witness_stack = excluded.witness_stack, +split_commitment_proof = excluded.split_commitment_proof ` type UpsertAssetWitnessParams struct { @@ -2938,6 +3145,7 @@ type UpsertAssetWitnessParams struct { WitnessIndex int32 } +// We overwrite the witness with the new one. func (q *Queries) UpsertAssetWitness(ctx context.Context, arg UpsertAssetWitnessParams) error { _, err := q.db.ExecContext(ctx, UpsertAssetWitness, arg.AssetID, @@ -2958,11 +3166,10 @@ INSERT INTO chain_txns ( $1, $2, $3, $4, $5, $6 ) ON CONFLICT (txid) - -- Not a NOP but instead update any nullable fields that aren't null in the - -- args. - DO UPDATE SET block_height = COALESCE(EXCLUDED.block_height, chain_txns.block_height), - block_hash = COALESCE(EXCLUDED.block_hash, chain_txns.block_hash), - tx_index = COALESCE(EXCLUDED.tx_index, chain_txns.tx_index) +DO UPDATE SET block_height += COALESCE(excluded.block_height, chain_txns.block_height), +block_hash = COALESCE(excluded.block_hash, chain_txns.block_hash), +tx_index = COALESCE(excluded.tx_index, chain_txns.tx_index) RETURNING txn_id ` @@ -2975,6 +3182,8 @@ type UpsertChainTxParams struct { TxIndex sql.NullInt32 } +// Not a NOP but instead update any nullable fields that aren't null in the +// args. func (q *Queries) UpsertChainTx(ctx context.Context, arg UpsertChainTxParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertChainTx, arg.Txid, @@ -2995,13 +3204,18 @@ WITH target_meta_id AS ( FROM assets_meta WHERE meta_data_hash = $1 ) + INSERT INTO genesis_assets ( - asset_id, asset_tag, meta_data_id, output_index, asset_type, genesis_point_id + asset_id, + asset_tag, + meta_data_id, + output_index, + asset_type, + genesis_point_id ) VALUES ( $2, $3, (SELECT meta_id FROM target_meta_id), $4, $5, $6 ) ON CONFLICT (asset_id) - -- This is a NOP, asset_id is the unique field that caused the conflict. - DO UPDATE SET asset_id = EXCLUDED.asset_id +DO UPDATE SET asset_id = excluded.asset_id RETURNING gen_asset_id ` @@ -3014,6 +3228,7 @@ type UpsertGenesisAssetParams struct { GenesisPointID int64 } +// This is a NOP, asset_id is the unique field that caused the conflict. func (q *Queries) UpsertGenesisAsset(ctx context.Context, arg UpsertGenesisAssetParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertGenesisAsset, arg.MetaDataHash, @@ -3029,16 +3244,16 @@ func (q *Queries) UpsertGenesisAsset(ctx context.Context, arg UpsertGenesisAsset } const UpsertGenesisPoint = `-- name: UpsertGenesisPoint :one -INSERT INTO genesis_points( +INSERT INTO genesis_points ( prev_out ) VALUES ( $1 ) ON CONFLICT (prev_out) - -- This is a NOP, prev_out is the unique field that caused the conflict. - DO UPDATE SET prev_out = EXCLUDED.prev_out +DO UPDATE SET prev_out = excluded.prev_out RETURNING genesis_id ` +// This is a NOP, prev_out is the unique field that caused the conflict. func (q *Queries) UpsertGenesisPoint(ctx context.Context, prevOut []byte) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertGenesisPoint, prevOut) var genesis_id int64 @@ -3048,12 +3263,11 @@ func (q *Queries) UpsertGenesisPoint(ctx context.Context, prevOut []byte) (int64 const UpsertInternalKey = `-- name: UpsertInternalKey :one INSERT INTO internal_keys ( - raw_key, key_family, key_index + raw_key, key_family, key_index ) VALUES ( $1, $2, $3 ) ON CONFLICT (raw_key) - -- This is a NOP, raw_key is the unique field that caused the conflict. - DO UPDATE SET raw_key = EXCLUDED.raw_key +DO UPDATE SET raw_key = excluded.raw_key RETURNING key_id ` @@ -3063,6 +3277,7 @@ type UpsertInternalKeyParams struct { KeyIndex int32 } +// This is a NOP, raw_key is the unique field that caused the conflict. func (q *Queries) UpsertInternalKey(ctx context.Context, arg UpsertInternalKeyParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertInternalKey, arg.RawKey, arg.KeyFamily, arg.KeyIndex) var key_id int64 @@ -3071,20 +3286,20 @@ func (q *Queries) UpsertInternalKey(ctx context.Context, arg UpsertInternalKeyPa } const UpsertManagedUTXO = `-- name: UpsertManagedUTXO :one -WITH target_key(key_id) AS ( +WITH target_key (key_id) AS ( SELECT key_id FROM internal_keys WHERE raw_key = $1 ) + INSERT INTO managed_utxos ( outpoint, amt_sats, internal_key_id, tapscript_sibling, merkle_root, txn_id, taproot_asset_root, root_version ) VALUES ( $2, $3, (SELECT key_id FROM target_key), $4, $5, $6, $7, $8 ) ON CONFLICT (outpoint) - -- Not a NOP but instead update any nullable fields that aren't null in the - -- args. - DO UPDATE SET tapscript_sibling = COALESCE(EXCLUDED.tapscript_sibling, managed_utxos.tapscript_sibling) +DO UPDATE SET tapscript_sibling += COALESCE(excluded.tapscript_sibling, managed_utxos.tapscript_sibling) RETURNING utxo_id ` @@ -3099,6 +3314,8 @@ type UpsertManagedUTXOParams struct { RootVersion sql.NullInt16 } +// Not a NOP but instead update any nullable fields that aren't null in the +// args. func (q *Queries) UpsertManagedUTXO(ctx context.Context, arg UpsertManagedUTXOParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertManagedUTXO, arg.RawKey, @@ -3120,9 +3337,10 @@ WITH target_batch AS ( -- This CTE is used to fetch the ID of a batch, based on the serialized -- internal key associated with the batch. SELECT keys.key_id AS batch_id - FROM internal_keys keys + FROM internal_keys AS keys WHERE keys.raw_key = $4 ) + INSERT INTO mint_anchor_uni_commitments ( batch_id, tx_output_index, taproot_internal_key_id, group_key ) @@ -3130,10 +3348,9 @@ VALUES ( (SELECT batch_id FROM target_batch), $1, $2, $3 ) -ON CONFLICT(batch_id, tx_output_index) DO UPDATE SET - -- The following fields are updated if a conflict occurs. - taproot_internal_key_id = EXCLUDED.taproot_internal_key_id, - group_key = EXCLUDED.group_key +ON CONFLICT (batch_id, tx_output_index) DO UPDATE SET +taproot_internal_key_id = excluded.taproot_internal_key_id, +group_key = excluded.group_key RETURNING id ` @@ -3147,6 +3364,7 @@ type UpsertMintAnchorUniCommitmentParams struct { // Upsert a record into the mint_anchor_uni_commitments table. // If a record with the same batch ID and tx output index already exists, update // the existing record. Otherwise, insert a new record. +// The following fields are updated if a conflict occurs. func (q *Queries) UpsertMintAnchorUniCommitment(ctx context.Context, arg UpsertMintAnchorUniCommitmentParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertMintAnchorUniCommitment, arg.TxOutputIndex, @@ -3164,26 +3382,17 @@ INSERT INTO script_keys ( internal_key_id, tweaked_script_key, tweak, key_type ) VALUES ( $1, $2, $3, $4 -) ON CONFLICT (tweaked_script_key) - -- Overwrite the declared_known, key_type and tweak fields if they were - -- previously unknown. - DO UPDATE SET - tweaked_script_key = EXCLUDED.tweaked_script_key, - -- If the tweak was previously unknown, we'll update to the new value. - tweak = - CASE - WHEN script_keys.tweak IS NULL - THEN COALESCE(EXCLUDED.tweak, script_keys.tweak) - ELSE script_keys.tweak - END, - -- We only overwrite the key type with a value that does not mean - -- "unknown" (0 or NULL). - key_type = - CASE - WHEN COALESCE(EXCLUDED.key_type, 0) != 0 - THEN EXCLUDED.key_type - ELSE script_keys.key_type - END +) ON CONFLICT (tweaked_script_key) +DO UPDATE SET +tweaked_script_key = excluded.tweaked_script_key, +tweak += COALESCE(script_keys.tweak, COALESCE(excluded.tweak, script_keys.tweak)), +key_type += CASE + WHEN COALESCE(excluded.key_type, 0) != 0 + THEN excluded.key_type + ELSE script_keys.key_type +END RETURNING script_key_id ` @@ -3194,6 +3403,11 @@ type UpsertScriptKeyParams struct { KeyType sql.NullInt16 } +// Overwrite the declared_known, key_type and tweak fields if they were +// previously unknown. +// If the tweak was previously unknown, we'll update to the new value. +// We only overwrite the key type with a value that does not mean +// "unknown" (0 or NULL). func (q *Queries) UpsertScriptKey(ctx context.Context, arg UpsertScriptKeyParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertScriptKey, arg.InternalKeyID, @@ -3212,10 +3426,8 @@ INSERT INTO tapscript_edges ( ) VALUES ( $1, $2, $3 ) ON CONFLICT (root_hash_id, node_index, raw_node_id) - -- This is a NOP, root_hash_id, node_index, and raw_node_id are the unique - -- fields that caused the conflict. - DO UPDATE SET root_hash_id = EXCLUDED.root_hash_id, - node_index = EXCLUDED.node_index, raw_node_id = EXCLUDED.raw_node_id +DO UPDATE SET root_hash_id = excluded.root_hash_id, +node_index = excluded.node_index, raw_node_id = excluded.raw_node_id RETURNING edge_id ` @@ -3225,6 +3437,8 @@ type UpsertTapscriptTreeEdgeParams struct { RawNodeID int64 } +// This is a NOP, root_hash_id, node_index, and raw_node_id are the unique +// fields that caused the conflict. func (q *Queries) UpsertTapscriptTreeEdge(ctx context.Context, arg UpsertTapscriptTreeEdgeParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertTapscriptTreeEdge, arg.RootHashID, arg.NodeIndex, arg.RawNodeID) var edge_id int64 @@ -3238,11 +3452,11 @@ INSERT INTO tapscript_nodes ( ) VALUES ( $1 ) ON CONFLICT (raw_node) - -- This is a NOP, raw_node is the unique field that caused the conflict. - DO UPDATE SET raw_node = EXCLUDED.raw_node +DO UPDATE SET raw_node = excluded.raw_node RETURNING node_id ` +// This is a NOP, raw_node is the unique field that caused the conflict. func (q *Queries) UpsertTapscriptTreeNode(ctx context.Context, rawNode []byte) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertTapscriptTreeNode, rawNode) var node_id int64 @@ -3256,10 +3470,7 @@ INSERT INTO tapscript_roots ( ) VALUES ( $1, $2 ) ON CONFLICT (root_hash) - -- This is a NOP, the root_hash is the unique field that caused the - -- conflict. The tree should be deleted before switching between branch and - -- leaf storage for the same root hash. - DO UPDATE SET root_hash = EXCLUDED.root_hash +DO UPDATE SET root_hash = excluded.root_hash RETURNING root_id ` @@ -3268,6 +3479,9 @@ type UpsertTapscriptTreeRootHashParams struct { BranchOnly bool } +// This is a NOP, the root_hash is the unique field that caused the +// conflict. The tree should be deleted before switching between branch and +// leaf storage for the same root hash. func (q *Queries) UpsertTapscriptTreeRootHash(ctx context.Context, arg UpsertTapscriptTreeRootHashParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertTapscriptTreeRootHash, arg.RootHash, arg.BranchOnly) var root_id int64 diff --git a/tapdb/sqlc/macaroons.sql.go b/tapdb/sqlc/macaroons.sql.go index 0c4930de1..f4d44762f 100644 --- a/tapdb/sqlc/macaroons.sql.go +++ b/tapdb/sqlc/macaroons.sql.go @@ -10,7 +10,7 @@ import ( ) const GetRootKey = `-- name: GetRootKey :one -SELECT id, root_key FROM macaroons +SELECT id, root_key FROM macaroons WHERE id = $1 ` diff --git a/tapdb/sqlc/metadata.sql.go b/tapdb/sqlc/metadata.sql.go index 32788764a..d5b4441a9 100644 --- a/tapdb/sqlc/metadata.sql.go +++ b/tapdb/sqlc/metadata.sql.go @@ -21,8 +21,8 @@ func (q *Queries) AssetsDBSizePostgres(ctx context.Context) (int64, error) { } const AssetsDBSizeSqlite = `-- name: AssetsDBSizeSqlite :one -SELECT page_count * page_size AS size_in_bytes -FROM pragma_page_count(), pragma_page_size() +SELECT pc.page_count * ps.page_size AS size_in_bytes +FROM pragma_page_count() AS pc, pragma_page_size() AS ps ` func (q *Queries) AssetsDBSizeSqlite(ctx context.Context) (int32, error) { diff --git a/tapdb/sqlc/mssmt.sql.go b/tapdb/sqlc/mssmt.sql.go index 3a793ba1c..e7f00943f 100644 --- a/tapdb/sqlc/mssmt.sql.go +++ b/tapdb/sqlc/mssmt.sql.go @@ -10,7 +10,8 @@ import ( ) const DeleteAllNodes = `-- name: DeleteAllNodes :execrows -DELETE FROM mssmt_nodes WHERE namespace = $1 +DELETE FROM mssmt_nodes +WHERE namespace = $1 ` func (q *Queries) DeleteAllNodes(ctx context.Context, namespace string) (int64, error) { @@ -22,7 +23,8 @@ func (q *Queries) DeleteAllNodes(ctx context.Context, namespace string) (int64, } const DeleteNode = `-- name: DeleteNode :execrows -DELETE FROM mssmt_nodes WHERE hash_key = $1 AND namespace = $2 +DELETE FROM mssmt_nodes +WHERE hash_key = $1 AND namespace = $2 ` type DeleteNodeParams struct { @@ -39,7 +41,8 @@ func (q *Queries) DeleteNode(ctx context.Context, arg DeleteNodeParams) (int64, } const DeleteRoot = `-- name: DeleteRoot :execrows -DELETE FROM mssmt_roots WHERE namespace = $1 +DELETE FROM mssmt_roots +WHERE namespace = $1 ` func (q *Queries) DeleteRoot(ctx context.Context, namespace string) (int64, error) { @@ -88,23 +91,45 @@ func (q *Queries) FetchAllNodes(ctx context.Context) ([]MssmtNode, error) { const FetchChildren = `-- name: FetchChildren :many WITH RECURSIVE mssmt_branches_cte ( hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth -) -AS ( - SELECT r.hash_key, r.l_hash_key, r.r_hash_key, r.key, r.value, r.sum, r.namespace, 0 as depth - FROM mssmt_nodes r +) AS ( + SELECT + r.hash_key, + r.l_hash_key, + r.r_hash_key, + r.key, + r.value, + r.sum, + r.namespace, + 0 AS depth + FROM mssmt_nodes AS r WHERE r.hash_key = $1 AND r.namespace = $2 UNION ALL - SELECT n.hash_key, n.l_hash_key, n.r_hash_key, n.key, n.value, n.sum, n.namespace, depth+1 - FROM mssmt_nodes n, mssmt_branches_cte b - WHERE n.namespace=b.namespace AND (n.hash_key=b.l_hash_key OR n.hash_key=b.r_hash_key) - /* + SELECT + n.hash_key, + n.l_hash_key, + n.r_hash_key, + n.key, + n.value, + n.sum, + n.namespace, + b.depth + 1 + FROM + mssmt_nodes AS n, + mssmt_branches_cte AS b + WHERE + n.namespace = b.namespace + AND (n.hash_key = b.l_hash_key OR n.hash_key = b.r_hash_key) +/* Limit the result set to 3 items. The first is always the root node, while the following 0, 1 or 2 nodes represent children of the root node. These children can either be the next level children, or one next level and one from the level after that. In the future we may use this limit to fetch entire subtrees too. */ -) SELECT hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth FROM mssmt_branches_cte WHERE depth < 3 +) + +SELECT hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth FROM mssmt_branches_cte +WHERE depth < 3 ` type FetchChildrenParams struct { @@ -159,14 +184,35 @@ const FetchChildrenSelfJoin = `-- name: FetchChildrenSelfJoin :many WITH subtree_cte ( hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth ) AS ( - SELECT r.hash_key, r.l_hash_key, r.r_hash_key, r.key, r.value, r.sum, r.namespace, 0 as depth - FROM mssmt_nodes r - WHERE r.hash_key = $1 AND r.namespace = $2 - UNION ALL - SELECT c.hash_key, c.l_hash_key, c.r_hash_key, c.key, c.value, c.sum, c.namespace, depth+1 - FROM mssmt_nodes c - INNER JOIN subtree_cte r ON r.l_hash_key=c.hash_key OR r.r_hash_key=c.hash_key -) SELECT hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth from subtree_cte WHERE depth < 3 + SELECT + r.hash_key, + r.l_hash_key, + r.r_hash_key, + r.key, + r.value, + r.sum, + r.namespace, + 0 AS depth + FROM mssmt_nodes AS r + WHERE r.hash_key = $1 AND r.namespace = $2 + UNION ALL + SELECT + c.hash_key, + c.l_hash_key, + c.r_hash_key, + c.key, + c.value, + c.sum, + c.namespace, + r.depth + 1 + FROM mssmt_nodes AS c + INNER JOIN + subtree_cte AS r + ON c.hash_key = r.l_hash_key OR c.hash_key = r.r_hash_key +) + +SELECT hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth FROM subtree_cte +WHERE depth < 3 ` type FetchChildrenSelfJoinParams struct { @@ -219,10 +265,11 @@ func (q *Queries) FetchChildrenSelfJoin(ctx context.Context, arg FetchChildrenSe const FetchRootNode = `-- name: FetchRootNode :one SELECT nodes.hash_key, nodes.l_hash_key, nodes.r_hash_key, nodes.key, nodes.value, nodes.sum, nodes.namespace -FROM mssmt_nodes nodes -JOIN mssmt_roots roots - ON roots.root_hash = nodes.hash_key AND - roots.namespace = $1 +FROM mssmt_nodes AS nodes +JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND roots.namespace = $1 ` func (q *Queries) FetchRootNode(ctx context.Context, namespace string) (MssmtNode, error) { @@ -319,8 +366,7 @@ INSERT INTO mssmt_roots ( ) VALUES ( $1, $2 ) ON CONFLICT (namespace) - -- Not a NOP, we always overwrite the root hash. - DO UPDATE SET root_hash = EXCLUDED.root_hash +DO UPDATE SET root_hash = excluded.root_hash ` type UpsertRootNodeParams struct { @@ -328,6 +374,7 @@ type UpsertRootNodeParams struct { Namespace string } +// Not a NOP, we always overwrite the root hash. func (q *Queries) UpsertRootNode(ctx context.Context, arg UpsertRootNodeParams) error { _, err := q.db.ExecContext(ctx, UpsertRootNode, arg.RootHash, arg.Namespace) return err diff --git a/tapdb/sqlc/querier.go b/tapdb/sqlc/querier.go index f5fcc50b2..8daa97659 100644 --- a/tapdb/sqlc/querier.go +++ b/tapdb/sqlc/querier.go @@ -16,6 +16,8 @@ type Querier interface { AllMintingBatches(ctx context.Context) ([]AllMintingBatchesRow, error) AnchorGenesisPoint(ctx context.Context, arg AnchorGenesisPointParams) error AnchorPendingAssets(ctx context.Context, arg AnchorPendingAssetsParams) error + // This is a NOP, anchor_utxo_id is one of the unique fields that caused the + // conflict. ApplyPendingOutput(ctx context.Context, arg ApplyPendingOutputParams) (int64, error) AssetsByGenesisPoint(ctx context.Context, prevOut []byte) ([]AssetsByGenesisPointRow, error) AssetsDBSizePostgres(ctx context.Context) (int64, error) @@ -141,6 +143,7 @@ type Querier interface { QueryFederationGlobalSyncConfigs(ctx context.Context) ([]FederationGlobalSyncConfig, error) // Join on mssmt_nodes to get leaf related fields. // Join on genesis_info_view to get leaf related fields. + // Universe leaves WHERE clauses. QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) QueryFederationUniSyncConfigs(ctx context.Context) ([]QueryFederationUniSyncConfigsRow, error) QueryMultiverseLeaves(ctx context.Context, arg QueryMultiverseLeavesParams) ([]QueryMultiverseLeavesRow, error) @@ -161,34 +164,75 @@ type Querier interface { UpdateBatchGenesisTx(ctx context.Context, arg UpdateBatchGenesisTxParams) error UpdateMintingBatchState(ctx context.Context, arg UpdateMintingBatchStateParams) error UpdateUTXOLease(ctx context.Context, arg UpdateUTXOLeaseParams) error + // If the WHERE clause below is true (exact match on all other fields, + // except for creation_time), we set taproot_output_key to its current + // conflicting value. This is a no-op in terms of data change but allows + // RETURNING id to work on the existing row. UpsertAddr(ctx context.Context, arg UpsertAddrParams) (int64, error) UpsertAddrEvent(ctx context.Context, arg UpsertAddrEventParams) (int64, error) + // This is a NOP, anchor_utxo_id is one of the unique fields that caused the + // conflict. UpsertAsset(ctx context.Context, arg UpsertAssetParams) (int64, error) + // This is not a NOP, update the genesis point ID in case it wasn't set + // before. UpsertAssetGroupKey(ctx context.Context, arg UpsertAssetGroupKeyParams) (int64, error) + // This is a NOP, gen_asset_id is the unique field that caused the conflict. UpsertAssetGroupWitness(ctx context.Context, arg UpsertAssetGroupWitnessParams) (int64, error) + // In this case, we may be inserting the data+type for an existing blob. So + // we'll set all of those values. At this layer we assume the meta hash + // has been validated elsewhere. UpsertAssetMeta(ctx context.Context, arg UpsertAssetMetaParams) (int64, error) + // This is not a NOP, we always overwrite the proof with the new one. UpsertAssetProofByID(ctx context.Context, arg UpsertAssetProofByIDParams) error + // We overwrite the witness with the new one. UpsertAssetWitness(ctx context.Context, arg UpsertAssetWitnessParams) error + // Not a NOP but instead update any nullable fields that aren't null in the + // args. UpsertChainTx(ctx context.Context, arg UpsertChainTxParams) (int64, error) UpsertFederationGlobalSyncConfig(ctx context.Context, arg UpsertFederationGlobalSyncConfigParams) error + // Increment the attempt counter. UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) UpsertFederationUniSyncConfig(ctx context.Context, arg UpsertFederationUniSyncConfigParams) error + // This is a NOP, asset_id is the unique field that caused the conflict. UpsertGenesisAsset(ctx context.Context, arg UpsertGenesisAssetParams) (int64, error) + // This is a NOP, prev_out is the unique field that caused the conflict. UpsertGenesisPoint(ctx context.Context, prevOut []byte) (int64, error) + // This is a NOP, raw_key is the unique field that caused the conflict. UpsertInternalKey(ctx context.Context, arg UpsertInternalKeyParams) (int64, error) + // Not a NOP but instead update any nullable fields that aren't null in the + // args. UpsertManagedUTXO(ctx context.Context, arg UpsertManagedUTXOParams) (int64, error) // Upsert a record into the mint_anchor_uni_commitments table. // If a record with the same batch ID and tx output index already exists, update // the existing record. Otherwise, insert a new record. + // The following fields are updated if a conflict occurs. UpsertMintAnchorUniCommitment(ctx context.Context, arg UpsertMintAnchorUniCommitmentParams) (int64, error) + // This is a no-op to allow returning the ID. UpsertMultiverseLeaf(ctx context.Context, arg UpsertMultiverseLeafParams) (int64, error) + // This is a no-op to allow returning the ID. UpsertMultiverseRoot(ctx context.Context, arg UpsertMultiverseRootParams) (int64, error) + // Not a NOP, we always overwrite the root hash. UpsertRootNode(ctx context.Context, arg UpsertRootNodeParams) error + // Overwrite the declared_known, key_type and tweak fields if they were + // previously unknown. + // If the tweak was previously unknown, we'll update to the new value. + // We only overwrite the key type with a value that does not mean + // "unknown" (0 or NULL). UpsertScriptKey(ctx context.Context, arg UpsertScriptKeyParams) (int64, error) + // This is a NOP, root_hash_id, node_index, and raw_node_id are the unique + // fields that caused the conflict. UpsertTapscriptTreeEdge(ctx context.Context, arg UpsertTapscriptTreeEdgeParams) (int64, error) + // This is a NOP, raw_node is the unique field that caused the conflict. UpsertTapscriptTreeNode(ctx context.Context, rawNode []byte) (int64, error) + // This is a NOP, the root_hash is the unique field that caused the + // conflict. The tree should be deleted before switching between branch and + // leaf storage for the same root hash. UpsertTapscriptTreeRootHash(ctx context.Context, arg UpsertTapscriptTreeRootHashParams) (int64, error) + // This is a NOP, minting_point and script_key_bytes are the unique fields + // that caused the conflict. UpsertUniverseLeaf(ctx context.Context, arg UpsertUniverseLeafParams) error + // This is a NOP, namespace_root is the unique field that caused the + // conflict. UpsertUniverseRoot(ctx context.Context, arg UpsertUniverseRootParams) (int64, error) } diff --git a/tapdb/sqlc/queries/addrs.sql b/tapdb/sqlc/queries/addrs.sql index ec16ee7f7..aecf484a6 100644 --- a/tapdb/sqlc/queries/addrs.sql +++ b/tapdb/sqlc/queries/addrs.sql @@ -14,96 +14,119 @@ INSERT INTO addrs ( proof_courier_addr ) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 -) +) ON CONFLICT (taproot_output_key) DO UPDATE SET - -- If the WHERE clause below is true (exact match on all other fields, - -- except for creation_time), we set taproot_output_key to its current - -- conflicting value. This is a no-op in terms of data change but allows - -- RETURNING id to work on the existing row. - taproot_output_key = excluded.taproot_output_key -WHERE - addrs.version = excluded.version - AND addrs.asset_version = excluded.asset_version - AND addrs.genesis_asset_id = excluded.genesis_asset_id - AND ( - (addrs.group_key IS NULL AND excluded.group_key IS NULL) - OR addrs.group_key = excluded.group_key - ) - AND addrs.script_key_id = excluded.script_key_id - AND addrs.taproot_key_id = excluded.taproot_key_id - AND ( - (addrs.tapscript_sibling IS NULL AND excluded.tapscript_sibling IS NULL) - OR addrs.tapscript_sibling = excluded.tapscript_sibling - ) - AND addrs.amount = excluded.amount - AND addrs.asset_type = excluded.asset_type - AND addrs.proof_courier_addr = excluded.proof_courier_addr +-- If the WHERE clause below is true (exact match on all other fields, +-- except for creation_time), we set taproot_output_key to its current +-- conflicting value. This is a no-op in terms of data change but allows +-- RETURNING id to work on the existing row. +taproot_output_key = excluded.taproot_output_key +WHERE +addrs.version = excluded.version +AND addrs.asset_version = excluded.asset_version +AND addrs.genesis_asset_id = excluded.genesis_asset_id +AND ( + (addrs.group_key IS NULL AND excluded.group_key IS NULL) + OR addrs.group_key = excluded.group_key +) +AND addrs.script_key_id = excluded.script_key_id +AND addrs.taproot_key_id = excluded.taproot_key_id +AND ( + (addrs.tapscript_sibling IS NULL AND excluded.tapscript_sibling IS NULL) + OR addrs.tapscript_sibling = excluded.tapscript_sibling +) +AND addrs.amount = excluded.amount +AND addrs.asset_type = excluded.asset_type +AND addrs.proof_courier_addr = excluded.proof_courier_addr RETURNING id; -- name: FetchAddrs :many -SELECT - version, asset_version, genesis_asset_id, group_key, tapscript_sibling, - taproot_output_key, amount, asset_type, creation_time, managed_from, - proof_courier_addr, - sqlc.embed(script_keys), - sqlc.embed(raw_script_keys), - taproot_keys.raw_key AS raw_taproot_key, +SELECT + addrs.version, + addrs.asset_version, + addrs.genesis_asset_id, + addrs.group_key, + addrs.tapscript_sibling, + addrs.taproot_output_key, + addrs.amount, + addrs.asset_type, + addrs.creation_time, + addrs.managed_from, + addrs.proof_courier_addr, + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(raw_script_keys), -- noqa: RF02,AL03 + taproot_keys.raw_key AS raw_taproot_key, taproot_keys.key_family AS taproot_key_family, taproot_keys.key_index AS taproot_key_index FROM addrs JOIN script_keys ON addrs.script_key_id = script_keys.script_key_id -JOIN internal_keys raw_script_keys +JOIN internal_keys AS raw_script_keys ON script_keys.internal_key_id = raw_script_keys.key_id -JOIN internal_keys taproot_keys +JOIN internal_keys AS taproot_keys ON addrs.taproot_key_id = taproot_keys.key_id -WHERE creation_time >= @created_after - AND creation_time <= @created_before - AND (@unmanaged_only = false OR - (CASE WHEN managed_from IS NULL THEN true ELSE false END) = @unmanaged_only) +WHERE + addrs.creation_time >= @created_after + AND addrs.creation_time <= @created_before + AND ( + @unmanaged_only = FALSE + OR (coalesce(addrs.managed_from IS NULL, FALSE)) = @unmanaged_only + ) ORDER BY addrs.creation_time LIMIT @num_limit OFFSET @num_offset; -- name: FetchAddrByTaprootOutputKey :one SELECT - version, asset_version, genesis_asset_id, group_key, tapscript_sibling, - taproot_output_key, amount, asset_type, creation_time, managed_from, - proof_courier_addr, - sqlc.embed(script_keys), - sqlc.embed(raw_script_keys), + addrs.version, + addrs.asset_version, + addrs.genesis_asset_id, + addrs.group_key, + addrs.tapscript_sibling, + addrs.taproot_output_key, + addrs.amount, + addrs.asset_type, + addrs.creation_time, + addrs.managed_from, + addrs.proof_courier_addr, + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(raw_script_keys), -- noqa: RF02,AL03 taproot_keys.raw_key AS raw_taproot_key, taproot_keys.key_family AS taproot_key_family, taproot_keys.key_index AS taproot_key_index FROM addrs JOIN script_keys - ON addrs.script_key_id = script_keys.script_key_id -JOIN internal_keys raw_script_keys - ON script_keys.internal_key_id = raw_script_keys.key_id -JOIN internal_keys taproot_keys - ON addrs.taproot_key_id = taproot_keys.key_id -WHERE taproot_output_key = $1; + ON addrs.script_key_id = script_keys.script_key_id +JOIN internal_keys AS raw_script_keys + ON script_keys.internal_key_id = raw_script_keys.key_id +JOIN internal_keys AS taproot_keys + ON addrs.taproot_key_id = taproot_keys.key_id +WHERE addrs.taproot_output_key = $1; -- name: SetAddrManaged :exec -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 ) + UPDATE addrs SET managed_from = $2 WHERE id = (SELECT addr_id FROM target_addr); -- name: UpsertAddrEvent :one -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 -), target_chain_txn(txn_id) AS ( - SELECT txn_id +), + +target_chain_txn (txn_id) AS ( + SELECT chain_txns.txn_id FROM chain_txns WHERE chain_txns.txid = $2 ) + INSERT INTO addr_events ( creation_time, addr_id, status, chain_txn_id, chain_txn_output_index, managed_utxo_id, asset_proof_id, asset_id @@ -112,63 +135,75 @@ INSERT INTO addr_events ( (SELECT txn_id FROM target_chain_txn), $5, $6, $7, $8 ) ON CONFLICT (addr_id, chain_txn_id, chain_txn_output_index) - DO UPDATE SET status = EXCLUDED.status, - asset_proof_id = COALESCE(EXCLUDED.asset_proof_id, addr_events.asset_proof_id), - asset_id = COALESCE(EXCLUDED.asset_id, addr_events.asset_id) +DO UPDATE SET status = excluded.status, +asset_proof_id = coalesce(excluded.asset_proof_id, addr_events.asset_proof_id), +asset_id = coalesce(excluded.asset_id, addr_events.asset_id) RETURNING id; -- name: FetchAddrEvent :one SELECT - creation_time, status, asset_proof_id, asset_id, - chain_txns.txid as txid, - chain_txns.block_height as confirmation_height, - chain_txn_output_index as output_index, - managed_utxos.amt_sats as amt_sats, - managed_utxos.tapscript_sibling as tapscript_sibling, - internal_keys.raw_key as internal_key + addr_events.creation_time, + addr_events.status, + addr_events.asset_proof_id, + addr_events.asset_id, + chain_txns.txid, + chain_txns.block_height AS confirmation_height, + addr_events.chain_txn_output_index AS output_index, + managed_utxos.amt_sats, + managed_utxos.tapscript_sibling, + internal_keys.raw_key AS internal_key FROM addr_events LEFT JOIN chain_txns - ON addr_events.chain_txn_id = chain_txns.txn_id + ON addr_events.chain_txn_id = chain_txns.txn_id LEFT JOIN managed_utxos - ON addr_events.managed_utxo_id = managed_utxos.utxo_id + ON addr_events.managed_utxo_id = managed_utxos.utxo_id LEFT JOIN internal_keys - ON managed_utxos.internal_key_id = internal_keys.key_id -WHERE id = $1; + ON managed_utxos.internal_key_id = internal_keys.key_id +WHERE addr_events.id = $1; -- name: FetchAddrEventByAddrKeyAndOutpoint :one -WITH target_addr(addr_id) AS ( - SELECT id +WITH target_addr (addr_id) AS ( + SELECT addrs.id FROM addrs WHERE addrs.taproot_output_key = $1 ) + SELECT - addr_events.id, creation_time, status, asset_proof_id, asset_id, - chain_txns.txid as txid, - chain_txns.block_height as confirmation_height, - chain_txn_output_index as output_index, - managed_utxos.amt_sats as amt_sats, - managed_utxos.tapscript_sibling as tapscript_sibling, - internal_keys.raw_key as internal_key + addr_events.id, + addr_events.creation_time, + addr_events.status, + addr_events.asset_proof_id, + addr_events.asset_id, + chain_txns.txid, + chain_txns.block_height AS confirmation_height, + addr_events.chain_txn_output_index AS output_index, + managed_utxos.amt_sats, + managed_utxos.tapscript_sibling, + internal_keys.raw_key AS internal_key FROM addr_events JOIN target_addr - ON addr_events.addr_id = target_addr.addr_id + ON addr_events.addr_id = target_addr.addr_id LEFT JOIN chain_txns - ON addr_events.chain_txn_id = chain_txns.txn_id + ON addr_events.chain_txn_id = chain_txns.txn_id LEFT JOIN managed_utxos - ON addr_events.managed_utxo_id = managed_utxos.utxo_id + ON addr_events.managed_utxo_id = managed_utxos.utxo_id LEFT JOIN internal_keys - ON managed_utxos.internal_key_id = internal_keys.key_id -WHERE chain_txns.txid = $2 - AND chain_txn_output_index = $3; + ON managed_utxos.internal_key_id = internal_keys.key_id +WHERE + chain_txns.txid = $2 + AND addr_events.chain_txn_output_index = $3; -- name: QueryEventIDs :many SELECT - addr_events.id as event_id, addrs.taproot_output_key as taproot_output_key + addr_events.id AS event_id, + addrs.taproot_output_key FROM addr_events JOIN addrs - ON addr_events.addr_id = addrs.id -WHERE addr_events.status >= @status_from - AND addr_events.status <= @status_to - AND COALESCE(@addr_taproot_key, addrs.taproot_output_key) = addrs.taproot_output_key - AND addr_events.creation_time >= @created_after -ORDER by addr_events.creation_time; + ON addr_events.addr_id = addrs.id +WHERE + addr_events.status >= @status_from + AND addr_events.status <= @status_to + AND coalesce(@addr_taproot_key, addrs.taproot_output_key) + = addrs.taproot_output_key + AND addr_events.creation_time >= @created_after +ORDER BY addr_events.creation_time; diff --git a/tapdb/sqlc/queries/assets.sql b/tapdb/sqlc/queries/assets.sql index 536b868db..04837110b 100644 --- a/tapdb/sqlc/queries/assets.sql +++ b/tapdb/sqlc/queries/assets.sql @@ -1,11 +1,11 @@ -- name: UpsertInternalKey :one INSERT INTO internal_keys ( - raw_key, key_family, key_index + raw_key, key_family, key_index ) VALUES ( $1, $2, $3 ) ON CONFLICT (raw_key) - -- This is a NOP, raw_key is the unique field that caused the conflict. - DO UPDATE SET raw_key = EXCLUDED.raw_key +-- This is a NOP, raw_key is the unique field that caused the conflict. +DO UPDATE SET raw_key = excluded.raw_key RETURNING key_id; -- name: NewMintingBatch :exec @@ -14,9 +14,11 @@ INSERT INTO asset_minting_batches ( ) VALUES (0, $1, $2, $3); -- name: FetchMintingBatchesByInverseState :many -SELECT * -FROM asset_minting_batches batches -JOIN internal_keys keys +SELECT + batches.*, + keys.* +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE batches.batch_state != $1; @@ -26,17 +28,20 @@ WITH target_batch AS ( -- internal key associated with the batch. This internal key is used as the -- actual Taproot internal key to ultimately mint the batch. This pattern -- is used in several other queries. - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -SELECT * -FROM asset_minting_batches batches -JOIN internal_keys keys + +SELECT + batches.*, + keys.* +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys ON batches.batch_id = keys.key_id -WHERE batch_id in (SELECT batch_id FROM target_batch); +WHERE batches.batch_id IN (SELECT tb.batch_id FROM target_batch AS tb); -- name: UpdateMintingBatchState :exec WITH target_batch AS ( @@ -44,27 +49,29 @@ WITH target_batch AS ( -- internal key associated with the batch. This internal key is used as the -- actual Taproot internal key to ultimately mint the batch. This pattern -- is used in several other queries. - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -UPDATE asset_minting_batches + +UPDATE asset_minting_batches SET batch_state = $2 -WHERE batch_id in (SELECT batch_id FROM target_batch); +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb); -- name: InsertAssetSeedling :exec INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, emission_enabled, batch_id, group_genesis_id, group_anchor_id, - script_key_id, group_internal_key_id, group_tapscript_root, delegation_key_id + script_key_id, group_internal_key_id, group_tapscript_root, + delegation_key_id ) VALUES ( - @asset_name, @asset_type, @asset_version, @asset_supply, - @asset_meta_id, @emission_enabled, @batch_id, - sqlc.narg('group_genesis_id'), sqlc.narg('group_anchor_id'), - sqlc.narg('script_key_id'), sqlc.narg('group_internal_key_id'), - @group_tapscript_root, sqlc.narg('delegation_key_id') + @asset_name, @asset_type, @asset_version, @asset_supply, + @asset_meta_id, @emission_enabled, @batch_id, + sqlc.narg('group_genesis_id'), sqlc.narg('group_anchor_id'), + sqlc.narg('script_key_id'), sqlc.narg('group_internal_key_id'), + @group_tapscript_root, sqlc.narg('delegation_key_id') ); -- name: FetchSeedlingID :one @@ -73,15 +80,16 @@ WITH target_key_id AS ( -- associated with a given batch. This can only return one value in -- practice since raw_key is a unique field. We then use this value below -- to select only from seedlings in the specified batch. - SELECT key_id - FROM internal_keys keys + SELECT keys.key_id + FROM internal_keys AS keys WHERE keys.raw_key = @batch_key ) -SELECT seedling_id + +SELECT asset_seedlings.seedling_id FROM asset_seedlings WHERE ( - asset_seedlings.batch_id in (SELECT key_id FROM target_key_id) AND - asset_seedlings.asset_name = @seedling_name + asset_seedlings.batch_id IN (SELECT tki.key_id FROM target_key_id AS tki) + AND asset_seedlings.asset_name = @seedling_name ); -- name: FetchSeedlingByID :one @@ -90,14 +98,16 @@ FROM asset_seedlings WHERE seedling_id = @seedling_id; -- name: AllInternalKeys :many -SELECT * +SELECT * FROM internal_keys; -- name: AllMintingBatches :many -SELECT * -FROM asset_minting_batches -JOIN internal_keys -ON asset_minting_batches.batch_id = internal_keys.key_id; +SELECT + batches.*, + keys.* +FROM asset_minting_batches AS batches +JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id; -- name: InsertAssetSeedlingIntoBatch :exec WITH target_key_id AS ( @@ -106,11 +116,12 @@ WITH target_key_id AS ( -- practice since raw_key is a unique field. We then use this value below -- to insert the seedling and point to the proper batch_id, which is a -- foreign key that references the key_id of the internal key. - SELECT key_id - FROM internal_keys keys + SELECT keys.key_id + FROM internal_keys AS keys WHERE keys.raw_key = $1 ) -INSERT INTO asset_seedlings( + +INSERT INTO asset_seedlings ( asset_name, asset_type, asset_version, asset_supply, asset_meta_id, emission_enabled, batch_id, group_genesis_id, group_anchor_id, script_key_id, group_internal_key_id, group_tapscript_root, @@ -125,17 +136,26 @@ INSERT INTO asset_seedlings( ); -- name: FetchSeedlingsForBatch :many -WITH target_batch(batch_id) AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys +WITH target_batch (batch_id) AS ( + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) -SELECT seedling_id, asset_name, asset_type, asset_version, asset_supply, - sqlc.embed(assets_meta), - emission_enabled, batch_id, - group_genesis_id, group_anchor_id, group_tapscript_root, + +SELECT + asset_seedlings.seedling_id, + asset_seedlings.asset_name, + asset_seedlings.asset_type, + asset_seedlings.asset_version, + asset_seedlings.asset_supply, + sqlc.embed(assets_meta), -- noqa: RF02,AL03 + asset_seedlings.emission_enabled, + asset_seedlings.batch_id, + asset_seedlings.group_genesis_id, + asset_seedlings.group_anchor_id, + asset_seedlings.group_tapscript_root, -- TODO(guggero): We should use sqlc.embed() for the script key and internal -- key fields, but we can't because it's a LEFT JOIN. We should check if the -- LEFT JOIN is actually necessary or if we always have keys for seedlings. @@ -151,27 +171,27 @@ SELECT seedling_id, asset_name, asset_type, asset_version, asset_supply, delegation_internal_keys.raw_key AS delegation_key_raw, delegation_internal_keys.key_family AS delegation_key_fam, delegation_internal_keys.key_index AS delegation_key_index -FROM asset_seedlings +FROM asset_seedlings LEFT JOIN assets_meta ON asset_seedlings.asset_meta_id = assets_meta.meta_id LEFT JOIN script_keys ON asset_seedlings.script_key_id = script_keys.script_key_id LEFT JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id -LEFT JOIN internal_keys group_internal_keys +LEFT JOIN internal_keys AS group_internal_keys ON asset_seedlings.group_internal_key_id = group_internal_keys.key_id -LEFT JOIN internal_keys delegation_internal_keys +LEFT JOIN internal_keys AS delegation_internal_keys ON asset_seedlings.delegation_key_id = delegation_internal_keys.key_id -WHERE asset_seedlings.batch_id in (SELECT batch_id FROM target_batch); +WHERE asset_seedlings.batch_id IN (SELECT tb.batch_id FROM target_batch AS tb); -- name: UpsertGenesisPoint :one -INSERT INTO genesis_points( +INSERT INTO genesis_points ( prev_out ) VALUES ( $1 ) ON CONFLICT (prev_out) - -- This is a NOP, prev_out is the unique field that caused the conflict. - DO UPDATE SET prev_out = EXCLUDED.prev_out +-- This is a NOP, prev_out is the unique field that caused the conflict. +DO UPDATE SET prev_out = excluded.prev_out RETURNING genesis_id; -- name: UpsertAssetGroupKey :one @@ -181,9 +201,9 @@ INSERT INTO asset_groups ( ) VALUES ( $1, $2, $3, $4, $5, $6 ) ON CONFLICT (tweaked_group_key) - -- This is not a NOP, update the genesis point ID in case it wasn't set - -- before. - DO UPDATE SET genesis_point_id = EXCLUDED.genesis_point_id +-- This is not a NOP, update the genesis point ID in case it wasn't set +-- before. +DO UPDATE SET genesis_point_id = excluded.genesis_point_id RETURNING group_id; -- name: UpsertAssetGroupWitness :one @@ -192,8 +212,8 @@ INSERT INTO asset_group_witnesses ( ) VALUES ( $1, $2, $3 ) ON CONFLICT (gen_asset_id) - -- This is a NOP, gen_asset_id is the unique field that caused the conflict. - DO UPDATE SET gen_asset_id = EXCLUDED.gen_asset_id +-- This is a NOP, gen_asset_id is the unique field that caused the conflict. +DO UPDATE SET gen_asset_id = excluded.gen_asset_id RETURNING witness_id; -- name: UpsertGenesisAsset :one @@ -202,26 +222,32 @@ WITH target_meta_id AS ( FROM assets_meta WHERE meta_data_hash = $1 ) + INSERT INTO genesis_assets ( - asset_id, asset_tag, meta_data_id, output_index, asset_type, genesis_point_id + asset_id, + asset_tag, + meta_data_id, + output_index, + asset_type, + genesis_point_id ) VALUES ( $2, $3, (SELECT meta_id FROM target_meta_id), $4, $5, $6 ) ON CONFLICT (asset_id) - -- This is a NOP, asset_id is the unique field that caused the conflict. - DO UPDATE SET asset_id = EXCLUDED.asset_id +-- This is a NOP, asset_id is the unique field that caused the conflict. +DO UPDATE SET asset_id = excluded.asset_id RETURNING gen_asset_id; -- name: UpsertAsset :one INSERT INTO assets ( - genesis_id, version, script_key_id, asset_group_witness_id, script_version, + genesis_id, version, script_key_id, asset_group_witness_id, script_version, amount, lock_time, relative_lock_time, anchor_utxo_id, spent ) VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9, $10 ) ON CONFLICT (genesis_id, script_key_id, anchor_utxo_id) - -- This is a NOP, anchor_utxo_id is one of the unique fields that caused the - -- conflict. - DO UPDATE SET anchor_utxo_id = EXCLUDED.anchor_utxo_id +-- This is a NOP, anchor_utxo_id is one of the unique fields that caused the +-- conflict. +DO UPDATE SET anchor_utxo_id = excluded.anchor_utxo_id RETURNING asset_id; -- name: FetchAssetsForBatch :many @@ -233,49 +259,68 @@ WITH genesis_info AS ( -- points, to the internal key that reference the batch, then restricted -- for internal keys that match our main batch key. SELECT - gen_asset_id, asset_id, asset_tag, output_index, asset_type, - genesis_points.prev_out prev_out, + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out, assets_meta.meta_id FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points ON genesis_assets.genesis_point_id = genesis_points.genesis_id - JOIN asset_minting_batches batches + JOIN asset_minting_batches AS batches ON genesis_points.genesis_id = batches.genesis_id - JOIN internal_keys keys - ON keys.key_id = batches.batch_id + JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 -), key_group_info AS ( +), + +key_group_info AS ( -- This CTE is used to perform a series of joins that allow us to extract -- the group key information, as well as the group sigs for the series of -- assets we care about. We obtain only the assets found in the batch -- above, with the WHERE query at the bottom. - SELECT - witness_id, gen_asset_id, witness_stack, tapscript_root, - tweaked_group_key, raw_key, key_index, key_family - FROM asset_group_witnesses wit - JOIN asset_groups groups - ON wit.group_key_id = groups.group_id - JOIN internal_keys keys - ON keys.key_id = groups.internal_key_id + SELECT + wit.witness_id, + wit.gen_asset_id, + wit.witness_stack, + grp.tapscript_root, + grp.tweaked_group_key, + keys.raw_key, + keys.key_index, + keys.key_family + FROM asset_group_witnesses AS wit + JOIN asset_groups AS grp + ON wit.group_key_id = grp.group_id + JOIN internal_keys AS keys + ON grp.internal_key_id = keys.key_id -- TODO(roasbeef): or can join do this below? - WHERE wit.gen_asset_id IN (SELECT gen_asset_id FROM genesis_info) + WHERE wit.gen_asset_id IN (SELECT gi.gen_asset_id FROM genesis_info AS gi) ) -SELECT - version, - sqlc.embed(script_keys), - sqlc.embed(internal_keys), - key_group_info.tapscript_root, - key_group_info.witness_stack, + +SELECT + assets.version, + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(internal_keys), -- noqa: RF02,AL03 + key_group_info.tapscript_root, + key_group_info.witness_stack, key_group_info.tweaked_group_key, key_group_info.raw_key AS group_key_raw, key_group_info.key_family AS group_key_family, key_group_info.key_index AS group_key_index, - script_version, amount, lock_time, relative_lock_time, spent, - genesis_info.asset_id, genesis_info.asset_tag, - sqlc.embed(assets_meta), - genesis_info.output_index AS genesis_output_index, genesis_info.asset_type, + assets.script_version, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + assets.spent, + genesis_info.asset_id, + genesis_info.asset_tag, + sqlc.embed(assets_meta), -- noqa: RF02,AL03 + genesis_info.output_index AS genesis_output_index, + genesis_info.asset_type, genesis_info.prev_out AS genesis_prev_out FROM assets JOIN genesis_info @@ -290,25 +335,30 @@ LEFT JOIN assets_meta LEFT JOIN key_group_info ON assets.genesis_id = key_group_info.gen_asset_id JOIN script_keys - on assets.script_key_id = script_keys.script_key_id + ON assets.script_key_id = script_keys.script_key_id JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id; -- name: SetAssetSpent :one -WITH target_asset(asset_id) AS ( +WITH target_asset (asset_id) AS ( SELECT assets.asset_id FROM assets JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id + ON assets.script_key_id = script_keys.script_key_id JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - (utxos.outpoint = sqlc.narg('anchor_point') OR - sqlc.narg('anchor_point') IS NULL) - WHERE script_keys.tweaked_script_key = @script_key - AND genesis_assets.asset_id = @gen_asset_id + ON assets.genesis_id = genesis_assets.gen_asset_id + JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND ( + utxos.outpoint = sqlc.narg('anchor_point') + OR sqlc.narg('anchor_point') IS NULL + ) + WHERE + script_keys.tweaked_script_key = @script_key + AND genesis_assets.asset_id = @gen_asset_id ) + UPDATE assets SET spent = TRUE WHERE asset_id = (SELECT asset_id FROM target_asset) @@ -316,79 +366,120 @@ RETURNING assets.asset_id; -- name: QueryAssetBalancesByAsset :many SELECT - genesis_info_view.asset_id, SUM(amount) balance, - genesis_info_view.asset_tag, genesis_info_view.meta_hash, - genesis_info_view.asset_type, genesis_info_view.output_index, + genesis_info_view.asset_id, + SUM(assets.amount) AS balance, + genesis_info_view.asset_tag, + genesis_info_view.meta_hash, + genesis_info_view.asset_type, + genesis_info_view.output_index, genesis_info_view.prev_out AS genesis_point FROM assets JOIN genesis_info_view - ON assets.genesis_id = genesis_info_view.gen_asset_id AND - (genesis_info_view.asset_id = sqlc.narg('asset_id_filter') OR - sqlc.narg('asset_id_filter') IS NULL) + ON + assets.genesis_id = genesis_info_view.gen_asset_id + AND ( + genesis_info_view.asset_id = sqlc.narg('asset_id_filter') + OR sqlc.narg('asset_id_filter') IS NULL + ) -- We use a LEFT JOIN here as not every asset has a group key, so this'll -- generate rows that have NULL values for the group key fields if an asset -- doesn't have a group key. See the comment in fetchAssetSprouts for a work -- around that needs to be used with this query until a sqlc bug is fixed. -LEFT JOIN key_group_info_view +LEFT JOIN key_group_info_view -- noqa: ST11 ON assets.genesis_id = key_group_info_view.gen_asset_id -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - CASE - WHEN sqlc.narg('leased') = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > @now) - WHEN sqlc.narg('leased') = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= @now) - ELSE TRUE - END +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND CASE + WHEN sqlc.narg('leased') = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > @now + ) + WHEN sqlc.narg('leased') = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= @now + ) + ELSE TRUE + END JOIN script_keys ON assets.script_key_id = script_keys.script_key_id -WHERE spent = FALSE AND - (script_keys.key_type != sqlc.narg('exclude_script_key_type') OR - sqlc.narg('exclude_script_key_type') IS NULL) AND - (sqlc.narg('script_key_type') = script_keys.key_type OR - sqlc.narg('script_key_type') IS NULL) -GROUP BY assets.genesis_id, genesis_info_view.asset_id, - genesis_info_view.asset_tag, genesis_info_view.meta_hash, - genesis_info_view.asset_type, genesis_info_view.output_index, - genesis_info_view.prev_out; +WHERE + assets.spent = FALSE + AND ( + script_keys.key_type != sqlc.narg('exclude_script_key_type') + OR sqlc.narg('exclude_script_key_type') IS NULL + ) + AND ( + sqlc.narg('script_key_type') = script_keys.key_type + OR sqlc.narg('script_key_type') IS NULL + ) +GROUP BY + assets.genesis_id, genesis_info_view.asset_id, + genesis_info_view.asset_tag, genesis_info_view.meta_hash, + genesis_info_view.asset_type, genesis_info_view.output_index, + genesis_info_view.prev_out; -- name: QueryAssetBalancesByGroup :many SELECT - key_group_info_view.tweaked_group_key, SUM(amount) balance + key_group_info_view.tweaked_group_key, + SUM(assets.amount) AS balance FROM assets JOIN key_group_info_view - ON assets.genesis_id = key_group_info_view.gen_asset_id AND - (key_group_info_view.tweaked_group_key = sqlc.narg('key_group_filter') OR - sqlc.narg('key_group_filter') IS NULL) -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - CASE - WHEN sqlc.narg('leased') = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > @now) - WHEN sqlc.narg('leased') = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= @now) - ELSE TRUE - END + ON + assets.genesis_id = key_group_info_view.gen_asset_id + AND ( + key_group_info_view.tweaked_group_key + = sqlc.narg('key_group_filter') + OR sqlc.narg('key_group_filter') IS NULL + ) +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND CASE + WHEN sqlc.narg('leased') = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > @now + ) + WHEN sqlc.narg('leased') = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= @now + ) + ELSE TRUE + END JOIN script_keys ON assets.script_key_id = script_keys.script_key_id -WHERE spent = FALSE AND - (script_keys.key_type != sqlc.narg('exclude_script_key_type') OR - sqlc.narg('exclude_script_key_type') IS NULL) AND - (sqlc.narg('script_key_type') = script_keys.key_type OR - sqlc.narg('script_key_type') IS NULL) +WHERE + assets.spent = FALSE + AND ( + script_keys.key_type != sqlc.narg('exclude_script_key_type') + OR sqlc.narg('exclude_script_key_type') IS NULL + ) + AND ( + sqlc.narg('script_key_type') = script_keys.key_type + OR sqlc.narg('script_key_type') IS NULL + ) GROUP BY key_group_info_view.tweaked_group_key; -- name: FetchGroupedAssets :many SELECT assets.asset_id AS asset_primary_key, - amount, lock_time, relative_lock_time, spent, - genesis_info_view.asset_id AS asset_id, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + assets.spent, + genesis_info_view.asset_id, genesis_info_view.asset_tag, - genesis_info_view.meta_Hash, + genesis_info_view.meta_hash, genesis_info_view.asset_type, key_group_info_view.tweaked_group_key, assets.version AS asset_version @@ -397,18 +488,18 @@ JOIN genesis_info_view ON assets.genesis_id = genesis_info_view.gen_asset_id JOIN key_group_info_view ON assets.genesis_id = key_group_info_view.gen_asset_id -WHERE spent = false; +WHERE assets.spent = FALSE; -- name: FetchGroupByGroupKey :one SELECT - key_group_info_view.version AS version, - key_group_info_view.gen_asset_id AS gen_asset_id, - key_group_info_view.raw_key AS raw_key, - key_group_info_view.key_index AS key_index, - key_group_info_view.key_family AS key_family, - key_group_info_view.tapscript_root AS tapscript_root, - key_group_info_view.witness_stack AS witness_stack, - key_group_info_view.custom_subtree_root AS custom_subtree_root + key_group_info_view.version, + key_group_info_view.gen_asset_id, + key_group_info_view.raw_key, + key_group_info_view.key_index, + key_group_info_view.key_family, + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, + key_group_info_view.custom_subtree_root FROM key_group_info_view WHERE ( key_group_info_view.tweaked_group_key = @group_key @@ -419,14 +510,14 @@ LIMIT 1; -- name: FetchGroupByGenesis :one SELECT - key_group_info_view.version AS version, - key_group_info_view.tweaked_group_key AS tweaked_group_key, - key_group_info_view.raw_key AS raw_key, - key_group_info_view.key_index AS key_index, - key_group_info_view.key_family AS key_family, - key_group_info_view.tapscript_root AS tapscript_root, - key_group_info_view.witness_stack AS witness_stack, - key_group_info_view.custom_subtree_root AS custom_subtree_root + key_group_info_view.version, + key_group_info_view.tweaked_group_key, + key_group_info_view.raw_key, + key_group_info_view.key_index, + key_group_info_view.key_family, + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, + key_group_info_view.custom_subtree_root FROM key_group_info_view WHERE ( key_group_info_view.gen_asset_id = @genesis_id @@ -435,19 +526,24 @@ WHERE ( -- name: QueryAssets :many SELECT assets.asset_id AS asset_primary_key, - assets.genesis_id, assets.version, spent, - sqlc.embed(script_keys), - sqlc.embed(internal_keys), - key_group_info_view.tapscript_root, - key_group_info_view.witness_stack, + assets.genesis_id, + assets.version, + assets.spent, + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(internal_keys), -- noqa: RF02,AL03 + key_group_info_view.tapscript_root, + key_group_info_view.witness_stack, key_group_info_view.tweaked_group_key, key_group_info_view.raw_key AS group_key_raw, key_group_info_view.key_family AS group_key_family, key_group_info_view.key_index AS group_key_index, - script_version, amount, lock_time, relative_lock_time, - genesis_info_view.asset_id AS asset_id, + assets.script_version, + assets.amount, + assets.lock_time, + assets.relative_lock_time, + genesis_info_view.asset_id, genesis_info_view.asset_tag, - genesis_info_view.meta_hash, + genesis_info_view.meta_hash, genesis_info_view.output_index AS genesis_output_index, genesis_info_view.asset_type, genesis_info_view.prev_out AS genesis_prev_out, @@ -463,12 +559,16 @@ SELECT utxos.lease_owner AS anchor_lease_owner, utxos.lease_expiry AS anchor_lease_expiry, utxo_internal_keys.raw_key AS anchor_internal_key, - split_commitment_root_hash, split_commitment_root_value + assets.split_commitment_root_hash, + assets.split_commitment_root_value FROM assets JOIN genesis_info_view - ON assets.genesis_id = genesis_info_view.gen_asset_id AND - (genesis_info_view.asset_id = sqlc.narg('asset_id_filter') OR - sqlc.narg('asset_id_filter') IS NULL) + ON + assets.genesis_id = genesis_info_view.gen_asset_id + AND ( + genesis_info_view.asset_id = sqlc.narg('asset_id_filter') + OR sqlc.narg('asset_id_filter') IS NULL + ) -- We use a LEFT JOIN here as not every asset has a group key, so this'll -- generate rows that have NULL values for the group key fields if an asset -- doesn't have a group key. See the comment in fetchAssetSprouts for a work @@ -476,102 +576,132 @@ JOIN genesis_info_view LEFT JOIN key_group_info_view ON assets.genesis_id = key_group_info_view.gen_asset_id JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id AND - (script_keys.tweaked_script_key = sqlc.narg('tweaked_script_key') OR - sqlc.narg('tweaked_script_key') IS NULL) + ON + assets.script_key_id = script_keys.script_key_id + AND ( + script_keys.tweaked_script_key = sqlc.narg('tweaked_script_key') + OR sqlc.narg('tweaked_script_key') IS NULL + ) JOIN internal_keys ON script_keys.internal_key_id = internal_keys.key_id -JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id AND - (utxos.outpoint = sqlc.narg('anchor_point') OR - sqlc.narg('anchor_point') IS NULL) AND - CASE - WHEN sqlc.narg('leased') = true THEN - (utxos.lease_owner IS NOT NULL AND utxos.lease_expiry > @now) - WHEN sqlc.narg('leased') = false THEN - (utxos.lease_owner IS NULL OR - utxos.lease_expiry IS NULL OR - utxos.lease_expiry <= @now) - ELSE TRUE - END -JOIN internal_keys utxo_internal_keys +JOIN managed_utxos AS utxos + ON + assets.anchor_utxo_id = utxos.utxo_id + AND ( + utxos.outpoint = sqlc.narg('anchor_point') + OR sqlc.narg('anchor_point') IS NULL + ) + AND CASE + WHEN sqlc.narg('leased') = TRUE + THEN + ( + utxos.lease_owner IS NOT NULL + AND utxos.lease_expiry > @now + ) + WHEN sqlc.narg('leased') = FALSE + THEN + ( + utxos.lease_owner IS NULL + OR utxos.lease_expiry IS NULL + OR utxos.lease_expiry <= @now + ) + ELSE TRUE + END +JOIN internal_keys AS utxo_internal_keys ON utxos.internal_key_id = utxo_internal_keys.key_id -JOIN chain_txns txns - ON utxos.txn_id = txns.txn_id AND - COALESCE(txns.block_height, 0) >= COALESCE(sqlc.narg('min_anchor_height'), txns.block_height, 0) +JOIN chain_txns AS txns + ON + utxos.txn_id = txns.txn_id + AND COALESCE(txns.block_height, 0) + >= COALESCE(sqlc.narg('min_anchor_height'), txns.block_height, 0) -- This clause is used to select specific assets for a asset ID, general -- channel balances, and also coin selection. We use the sqlc.narg feature to -- make the entire statement evaluate to true, if none of these extra args are -- specified. WHERE ( - assets.amount >= COALESCE(sqlc.narg('min_amt'), assets.amount) AND - assets.amount <= COALESCE(sqlc.narg('max_amt'), assets.amount) AND - assets.spent = COALESCE(sqlc.narg('spent'), assets.spent) AND - (key_group_info_view.tweaked_group_key = sqlc.narg('key_group_filter') OR - sqlc.narg('key_group_filter') IS NULL) AND - assets.anchor_utxo_id = COALESCE(sqlc.narg('anchor_utxo_id'), assets.anchor_utxo_id) AND - assets.genesis_id = COALESCE(sqlc.narg('genesis_id'), assets.genesis_id) AND - assets.script_key_id = COALESCE(sqlc.narg('script_key_id'), assets.script_key_id) AND - (sqlc.narg('script_key_type') = script_keys.key_type OR - sqlc.narg('script_key_type') IS NULL) + assets.amount >= COALESCE(sqlc.narg('min_amt'), assets.amount) + AND assets.amount <= COALESCE(sqlc.narg('max_amt'), assets.amount) + AND assets.spent = COALESCE(sqlc.narg('spent'), assets.spent) + AND ( + key_group_info_view.tweaked_group_key = sqlc.narg('key_group_filter') + OR sqlc.narg('key_group_filter') IS NULL + ) + AND assets.anchor_utxo_id + = COALESCE(sqlc.narg('anchor_utxo_id'), assets.anchor_utxo_id) + AND assets.genesis_id = COALESCE(sqlc.narg('genesis_id'), assets.genesis_id) + AND assets.script_key_id + = COALESCE(sqlc.narg('script_key_id'), assets.script_key_id) + AND ( + sqlc.narg('script_key_type') = script_keys.key_type + OR sqlc.narg('script_key_type') IS NULL + ) ); -- name: AllAssets :many -SELECT * +SELECT * FROM assets; -- name: AssetsInBatch :many SELECT - gen_asset_id, asset_id, asset_tag, assets_meta.meta_data_hash, - output_index, asset_type, genesis_points.prev_out prev_out + genesis_assets.gen_asset_id, + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points ON genesis_assets.genesis_point_id = genesis_points.genesis_id -JOIN asset_minting_batches batches +JOIN asset_minting_batches AS batches ON genesis_points.genesis_id = batches.genesis_id -JOIN internal_keys keys - ON keys.key_id = batches.batch_id +JOIN internal_keys AS keys + ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1; -- name: BindMintingBatchWithTx :one WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches -SET minting_tx_psbt = $2, change_output_index = $3, assets_output_index = $4, +SET + minting_tx_psbt = $2, change_output_index = $3, assets_output_index = $4, genesis_id = $5, universe_commitments = $6 -WHERE batch_id IN (SELECT batch_id FROM target_batch) +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb) RETURNING batch_id; -- name: BindMintingBatchWithTapSibling :exec WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches SET tapscript_sibling = $2 -WHERE batch_id IN (SELECT batch_id FROM target_batch); +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb); -- name: UpdateBatchGenesisTx :exec WITH target_batch AS ( - SELECT batch_id - FROM asset_minting_batches batches - JOIN internal_keys keys + SELECT batches.batch_id + FROM asset_minting_batches AS batches + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE asset_minting_batches SET minting_tx_psbt = $2 -WHERE batch_id in (SELECT batch_id FROM target_batch); +WHERE batch_id IN (SELECT tb.batch_id FROM target_batch AS tb); -- name: UpsertChainTx :one INSERT INTO chain_txns ( @@ -580,11 +710,12 @@ INSERT INTO chain_txns ( $1, $2, $3, sqlc.narg('block_height'), sqlc.narg('block_hash'), sqlc.narg('tx_index') ) ON CONFLICT (txid) - -- Not a NOP but instead update any nullable fields that aren't null in the - -- args. - DO UPDATE SET block_height = COALESCE(EXCLUDED.block_height, chain_txns.block_height), - block_hash = COALESCE(EXCLUDED.block_hash, chain_txns.block_hash), - tx_index = COALESCE(EXCLUDED.tx_index, chain_txns.tx_index) +-- Not a NOP but instead update any nullable fields that aren't null in the +-- args. +DO UPDATE SET block_height += COALESCE(excluded.block_height, chain_txns.block_height), +block_hash = COALESCE(excluded.block_hash, chain_txns.block_hash), +tx_index = COALESCE(excluded.tx_index, chain_txns.tx_index) RETURNING txn_id; -- name: FetchChainTx :one @@ -593,63 +724,75 @@ FROM chain_txns WHERE txid = $1; -- name: UpsertManagedUTXO :one -WITH target_key(key_id) AS ( +WITH target_key (key_id) AS ( SELECT key_id FROM internal_keys WHERE raw_key = $1 ) + INSERT INTO managed_utxos ( outpoint, amt_sats, internal_key_id, tapscript_sibling, merkle_root, txn_id, taproot_asset_root, root_version ) VALUES ( $2, $3, (SELECT key_id FROM target_key), $4, $5, $6, $7, $8 ) ON CONFLICT (outpoint) - -- Not a NOP but instead update any nullable fields that aren't null in the - -- args. - DO UPDATE SET tapscript_sibling = COALESCE(EXCLUDED.tapscript_sibling, managed_utxos.tapscript_sibling) +-- Not a NOP but instead update any nullable fields that aren't null in the +-- args. +DO UPDATE SET tapscript_sibling += COALESCE(excluded.tapscript_sibling, managed_utxos.tapscript_sibling) RETURNING utxo_id; -- name: FetchManagedUTXO :one -SELECT * -FROM managed_utxos utxos -JOIN internal_keys keys +SELECT + utxos.*, + keys.* +FROM managed_utxos AS utxos +JOIN internal_keys AS keys ON utxos.internal_key_id = keys.key_id WHERE ( - (txn_id = sqlc.narg('txn_id') OR sqlc.narg('txn_id') IS NULL) AND - (utxos.outpoint = sqlc.narg('outpoint') OR sqlc.narg('outpoint') IS NULL) + (utxos.txn_id = sqlc.narg('txn_id') OR sqlc.narg('txn_id') IS NULL) + AND ( + utxos.outpoint = sqlc.narg('outpoint') OR sqlc.narg('outpoint') IS NULL + ) ); -- name: FetchManagedUTXOs :many -SELECT * -FROM managed_utxos utxos -JOIN internal_keys keys +SELECT + utxos.*, + keys.* +FROM managed_utxos AS utxos +JOIN internal_keys AS keys ON utxos.internal_key_id = keys.key_id; -- name: AnchorPendingAssets :exec WITH assets_to_update AS ( - SELECT script_key_id - FROM assets - JOIN genesis_assets + SELECT assets.script_key_id + FROM assets + JOIN genesis_assets ON assets.genesis_id = genesis_assets.gen_asset_id JOIN genesis_points - ON genesis_points.genesis_id = genesis_assets.genesis_point_id - WHERE prev_out = $1 + ON genesis_assets.genesis_point_id = genesis_points.genesis_id + WHERE genesis_points.prev_out = $1 ) + UPDATE assets SET anchor_utxo_id = $2 -WHERE script_key_id in (SELECT script_key_id FROM assets_to_update); +WHERE script_key_id IN (SELECT u.script_key_id FROM assets_to_update AS u); -- name: AssetsByGenesisPoint :many -SELECT * -FROM assets -JOIN genesis_assets +SELECT + assets.*, + genesis_assets.*, + genesis_points.* +FROM assets +JOIN genesis_assets ON assets.genesis_id = genesis_assets.gen_asset_id JOIN genesis_points - ON genesis_points.genesis_id = genesis_assets.genesis_point_id -WHERE prev_out = $1; + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +WHERE genesis_points.prev_out = $1; -- name: GenesisAssets :many -SELECT * +SELECT * FROM genesis_assets; -- name: GenesisPoints :many @@ -657,22 +800,25 @@ SELECT * FROM genesis_points; -- name: FetchGenesisID :one -WITH target_point(genesis_id) AS ( - SELECT genesis_id +WITH target_point (genesis_id) AS ( + SELECT genesis_points.genesis_id FROM genesis_points WHERE genesis_points.prev_out = @prev_out ) -SELECT gen_asset_id + +SELECT genesis_assets.gen_asset_id FROM genesis_assets -LEFT JOIN assets_meta +LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id WHERE ( - genesis_assets.genesis_point_id IN (SELECT genesis_id FROM target_point) AND - genesis_assets.asset_id = @asset_id AND - genesis_assets.asset_tag = @asset_tag AND - assets_meta.meta_data_hash = @meta_hash AND - genesis_assets.output_index = @output_index AND - genesis_assets.asset_type = @asset_type + genesis_assets.genesis_point_id IN ( + SELECT tp.genesis_id FROM target_point AS tp + ) + AND genesis_assets.asset_id = @asset_id + AND genesis_assets.asset_tag = @asset_tag + AND assets_meta.meta_data_hash = @meta_hash + AND genesis_assets.output_index = @output_index + AND genesis_assets.asset_type = @asset_type ); -- name: FetchGenesisIDByAssetID :one @@ -686,57 +832,67 @@ FROM assets WHERE anchor_utxo_id = $1; -- name: AnchorGenesisPoint :exec -WITH target_point(genesis_id) AS ( - SELECT genesis_id +WITH target_point (genesis_id) AS ( + SELECT genesis_points.genesis_id FROM genesis_points WHERE genesis_points.prev_out = $1 ) + UPDATE genesis_points SET anchor_tx_id = $2 -WHERE genesis_id in (SELECT genesis_id FROM target_point); +WHERE genesis_id IN (SELECT genesis_id FROM target_point); -- name: FetchGenesisPointByAnchorTx :one -SELECT * +SELECT * FROM genesis_points WHERE anchor_tx_id = $1; -- name: FetchGenesisByID :one SELECT - asset_id, asset_tag, assets_meta.meta_data_hash, output_index, asset_type, - genesis_points.prev_out prev_out + genesis_assets.asset_id, + genesis_assets.asset_tag, + assets_meta.meta_data_hash, + genesis_assets.output_index, + genesis_assets.asset_type, + genesis_points.prev_out FROM genesis_assets LEFT JOIN assets_meta ON genesis_assets.meta_data_id = assets_meta.meta_id JOIN genesis_points - ON genesis_assets.genesis_point_id = genesis_points.genesis_id -WHERE gen_asset_id = $1; + ON genesis_assets.genesis_point_id = genesis_points.genesis_id +WHERE genesis_assets.gen_asset_id = $1; -- name: ConfirmChainTx :exec -WITH target_txn(txn_id) AS ( - SELECT anchor_tx_id - FROM genesis_points points - JOIN asset_minting_batches batches - ON batches.genesis_id = points.genesis_id - JOIN internal_keys keys +WITH target_txn (txn_id) AS ( + SELECT points.anchor_tx_id + FROM genesis_points AS points + JOIN asset_minting_batches AS batches + ON points.genesis_id = batches.genesis_id + JOIN internal_keys AS keys ON batches.batch_id = keys.key_id WHERE keys.raw_key = $1 ) + UPDATE chain_txns SET block_height = $2, block_hash = $3, tx_index = $4 -WHERE txn_id in (SELECT txn_id FROM target_txn); +WHERE txn_id IN (SELECT txn_id FROM target_txn); -- name: FetchAssetID :many -SELECT asset_id - FROM assets - JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id - WHERE - (script_keys.tweaked_script_key = sqlc.narg('tweaked_script_key') - OR sqlc.narg('tweaked_script_key') IS NULL) - AND (utxos.outpoint = sqlc.narg('outpoint') - OR sqlc.narg('outpoint') IS NULL); +SELECT assets.asset_id +FROM assets +JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id +JOIN managed_utxos AS utxos + ON assets.anchor_utxo_id = utxos.utxo_id +WHERE + ( + script_keys.tweaked_script_key = sqlc.narg('tweaked_script_key') + OR sqlc.narg('tweaked_script_key') IS NULL + ) + AND ( + utxos.outpoint = sqlc.narg('outpoint') + OR sqlc.narg('outpoint') IS NULL + ); -- name: UpsertAssetProofByID :exec INSERT INTO asset_proofs ( @@ -744,24 +900,30 @@ INSERT INTO asset_proofs ( ) VALUES ( @asset_id, @proof_file ) ON CONFLICT (asset_id) - -- This is not a NOP, we always overwrite the proof with the new one. - DO UPDATE SET proof_file = EXCLUDED.proof_file; +-- This is not a NOP, we always overwrite the proof with the new one. +DO UPDATE SET proof_file = excluded.proof_file; -- name: FetchAssetProofs :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key + SELECT + assets.asset_id, + script_keys.tweaked_script_key FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id; + ON asset_proofs.asset_id = asset_info.asset_id; -- name: FetchAssetProofsSizes :many -SELECT script_keys.tweaked_script_key AS script_key, - LENGTH(asset_proofs.proof_file) AS proof_file_length +SELECT + script_keys.tweaked_script_key AS script_key, + LENGTH(asset_proofs.proof_file) AS proof_file_length FROM asset_proofs JOIN assets ON asset_proofs.asset_id = assets.asset_id @@ -770,39 +932,58 @@ JOIN script_keys -- name: FetchAssetProofsByAssetID :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key + SELECT + assets.asset_id, + script_keys.tweaked_script_key FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id - JOIN genesis_assets gen + JOIN genesis_assets AS gen ON assets.genesis_id = gen.gen_asset_id WHERE gen.asset_id = $1 ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id; + ON asset_proofs.asset_id = asset_info.asset_id; -- name: FetchAssetProof :many WITH asset_info AS ( - SELECT assets.asset_id, script_keys.tweaked_script_key, utxos.outpoint + SELECT + assets.asset_id, + script_keys.tweaked_script_key, + utxos.outpoint FROM assets JOIN script_keys ON assets.script_key_id = script_keys.script_key_id - JOIN managed_utxos utxos + JOIN managed_utxos AS utxos ON assets.anchor_utxo_id = utxos.utxo_id JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - WHERE script_keys.tweaked_script_key = $1 - AND (utxos.outpoint = sqlc.narg('outpoint') OR sqlc.narg('outpoint') IS NULL) - AND (genesis_assets.asset_id = sqlc.narg('asset_id') OR sqlc.narg('asset_id') IS NULL) + ON assets.genesis_id = genesis_assets.gen_asset_id + WHERE + script_keys.tweaked_script_key = $1 + AND ( + utxos.outpoint = sqlc.narg('outpoint') + OR sqlc.narg('outpoint') IS NULL + ) + AND ( + genesis_assets.asset_id = sqlc.narg('asset_id') + OR sqlc.narg('asset_id') IS NULL + ) ) -SELECT asset_info.tweaked_script_key AS script_key, asset_proofs.proof_file, - asset_info.asset_id as asset_id, asset_proofs.proof_id as proof_id, - asset_info.outpoint as outpoint + +SELECT + asset_info.tweaked_script_key AS script_key, + asset_proofs.proof_file, + asset_info.asset_id, + asset_proofs.proof_id, + asset_info.outpoint FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id; + ON asset_proofs.asset_id = asset_info.asset_id; -- name: HasAssetProof :one WITH asset_info AS ( @@ -812,10 +993,11 @@ WITH asset_info AS ( ON assets.script_key_id = script_keys.script_key_id WHERE script_keys.tweaked_script_key = $1 ) -SELECT COUNT(asset_info.asset_id) > 0 as has_proof + +SELECT COUNT(asset_info.asset_id) > 0 AS has_proof FROM asset_proofs JOIN asset_info - ON asset_info.asset_id = asset_proofs.asset_id; + ON asset_proofs.asset_id = asset_info.asset_id; -- name: UpsertAssetWitness :exec INSERT INTO asset_witnesses ( @@ -823,25 +1005,29 @@ INSERT INTO asset_witnesses ( split_commitment_proof, witness_index ) VALUES ( $1, $2, $3, $4, $5, $6, $7 -) ON CONFLICT (asset_id, witness_index) - -- We overwrite the witness with the new one. - DO UPDATE SET prev_out_point = EXCLUDED.prev_out_point, - prev_asset_id = EXCLUDED.prev_asset_id, - prev_script_key = EXCLUDED.prev_script_key, - witness_stack = EXCLUDED.witness_stack, - split_commitment_proof = EXCLUDED.split_commitment_proof; +) ON CONFLICT (asset_id, witness_index) +-- We overwrite the witness with the new one. +DO UPDATE SET prev_out_point = excluded.prev_out_point, +prev_asset_id = excluded.prev_asset_id, +prev_script_key = excluded.prev_script_key, +witness_stack = excluded.witness_stack, +split_commitment_proof = excluded.split_commitment_proof; -- name: FetchAssetWitnesses :many -SELECT - assets.asset_id, prev_out_point, prev_asset_id, prev_script_key, - witness_stack, split_commitment_proof +SELECT + assets.asset_id, + asset_witnesses.prev_out_point, + asset_witnesses.prev_asset_id, + asset_witnesses.prev_script_key, + asset_witnesses.witness_stack, + asset_witnesses.split_commitment_proof FROM asset_witnesses JOIN assets ON asset_witnesses.asset_id = assets.asset_id WHERE ( (assets.asset_id = sqlc.narg('asset_id')) OR (sqlc.narg('asset_id') IS NULL) ) -ORDER BY witness_index; +ORDER BY asset_witnesses.witness_index; -- name: DeleteManagedUTXO :exec DELETE FROM managed_utxos @@ -860,9 +1046,10 @@ WHERE outpoint = @outpoint; -- name: DeleteExpiredUTXOLeases :exec UPDATE managed_utxos SET lease_owner = NULL, lease_expiry = NULL -WHERE lease_owner IS NOT NULL AND - lease_expiry IS NOT NULL AND - lease_expiry < @now; +WHERE + lease_owner IS NOT NULL + AND lease_expiry IS NOT NULL + AND lease_expiry < @now; -- name: ConfirmChainAnchorTx :exec UPDATE chain_txns @@ -874,26 +1061,22 @@ INSERT INTO script_keys ( internal_key_id, tweaked_script_key, tweak, key_type ) VALUES ( $1, $2, $3, $4 -) ON CONFLICT (tweaked_script_key) - -- Overwrite the declared_known, key_type and tweak fields if they were - -- previously unknown. - DO UPDATE SET - tweaked_script_key = EXCLUDED.tweaked_script_key, - -- If the tweak was previously unknown, we'll update to the new value. - tweak = - CASE - WHEN script_keys.tweak IS NULL - THEN COALESCE(EXCLUDED.tweak, script_keys.tweak) - ELSE script_keys.tweak - END, - -- We only overwrite the key type with a value that does not mean - -- "unknown" (0 or NULL). - key_type = - CASE - WHEN COALESCE(EXCLUDED.key_type, 0) != 0 - THEN EXCLUDED.key_type - ELSE script_keys.key_type - END +) ON CONFLICT (tweaked_script_key) +-- Overwrite the declared_known, key_type and tweak fields if they were +-- previously unknown. +DO UPDATE SET +tweaked_script_key = excluded.tweaked_script_key, +-- If the tweak was previously unknown, we'll update to the new value. +tweak += COALESCE(script_keys.tweak, COALESCE(excluded.tweak, script_keys.tweak)), +-- We only overwrite the key type with a value that does not mean +-- "unknown" (0 or NULL). +key_type += CASE + WHEN COALESCE(excluded.key_type, 0) != 0 + THEN excluded.key_type + ELSE script_keys.key_type +END RETURNING script_key_id; -- name: FetchScriptKeyIDByTweakedKey :one @@ -902,21 +1085,27 @@ FROM script_keys WHERE tweaked_script_key = $1; -- name: FetchScriptKeyByTweakedKey :one -SELECT sqlc.embed(script_keys), sqlc.embed(internal_keys) +SELECT + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(internal_keys) -- noqa: RF02,AL03 FROM script_keys JOIN internal_keys - ON script_keys.internal_key_id = internal_keys.key_id + ON script_keys.internal_key_id = internal_keys.key_id WHERE script_keys.tweaked_script_key = $1; -- name: FetchUnknownTypeScriptKeys :many -SELECT sqlc.embed(script_keys), sqlc.embed(internal_keys) +SELECT + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(internal_keys) -- noqa: RF02,AL03 FROM script_keys JOIN internal_keys - ON script_keys.internal_key_id = internal_keys.key_id + ON script_keys.internal_key_id = internal_keys.key_id WHERE script_keys.key_type IS NULL; -- name: FetchInternalKeyLocator :one -SELECT key_family, key_index +SELECT + key_family, + key_index FROM internal_keys WHERE raw_key = $1; @@ -926,10 +1115,10 @@ INSERT INTO tapscript_roots ( ) VALUES ( $1, $2 ) ON CONFLICT (root_hash) - -- This is a NOP, the root_hash is the unique field that caused the - -- conflict. The tree should be deleted before switching between branch and - -- leaf storage for the same root hash. - DO UPDATE SET root_hash = EXCLUDED.root_hash +-- This is a NOP, the root_hash is the unique field that caused the +-- conflict. The tree should be deleted before switching between branch and +-- leaf storage for the same root hash. +DO UPDATE SET root_hash = excluded.root_hash RETURNING root_id; -- name: UpsertTapscriptTreeNode :one @@ -938,8 +1127,8 @@ INSERT INTO tapscript_nodes ( ) VALUES ( $1 ) ON CONFLICT (raw_node) - -- This is a NOP, raw_node is the unique field that caused the conflict. - DO UPDATE SET raw_node = EXCLUDED.raw_node +-- This is a NOP, raw_node is the unique field that caused the conflict. +DO UPDATE SET raw_node = excluded.raw_node RETURNING node_id; -- name: UpsertTapscriptTreeEdge :one @@ -948,10 +1137,10 @@ INSERT INTO tapscript_edges ( ) VALUES ( $1, $2, $3 ) ON CONFLICT (root_hash_id, node_index, raw_node_id) - -- This is a NOP, root_hash_id, node_index, and raw_node_id are the unique - -- fields that caused the conflict. - DO UPDATE SET root_hash_id = EXCLUDED.root_hash_id, - node_index = EXCLUDED.node_index, raw_node_id = EXCLUDED.raw_node_id +-- This is a NOP, root_hash_id, node_index, and raw_node_id are the unique +-- fields that caused the conflict. +DO UPDATE SET root_hash_id = excluded.root_hash_id, +node_index = excluded.node_index, raw_node_id = excluded.raw_node_id RETURNING edge_id; -- name: FetchTapscriptTree :many @@ -959,17 +1148,22 @@ WITH tree_info AS ( -- This CTE is used to fetch all edges that link the given tapscript tree -- root hash to child nodes. Each edge also contains the index of the child -- node in the tapscript tree. - SELECT tapscript_roots.branch_only, tapscript_edges.raw_node_id, + SELECT + tapscript_roots.branch_only, + tapscript_edges.raw_node_id, tapscript_edges.node_index FROM tapscript_roots JOIN tapscript_edges ON tapscript_roots.root_id = tapscript_edges.root_hash_id WHERE tapscript_roots.root_hash = @root_hash ) -SELECT tree_info.branch_only, tapscript_nodes.raw_node + +SELECT + tree_info.branch_only, + tapscript_nodes.raw_node FROM tapscript_nodes JOIN tree_info - ON tree_info.raw_node_id = tapscript_nodes.node_id + ON tapscript_nodes.node_id = tree_info.raw_node_id -- Sort the nodes by node_index here instead of returning the indices. ORDER BY tree_info.node_index ASC; @@ -983,6 +1177,7 @@ WITH tree_info AS ( ON tapscript_edges.root_hash_id = tapscript_roots.root_id WHERE tapscript_roots.root_hash = @root_hash ) + DELETE FROM tapscript_edges WHERE edge_id IN (SELECT edge_id FROM tree_info); @@ -990,9 +1185,9 @@ WHERE edge_id IN (SELECT edge_id FROM tree_info); DELETE FROM tapscript_nodes WHERE NOT EXISTS ( SELECT 1 - FROM tapscript_edges - -- Delete any node that is not referenced by any edge. - WHERE tapscript_edges.raw_node_id = tapscript_nodes.node_id + FROM tapscript_edges + -- Delete any node that is not referenced by any edge. + WHERE tapscript_edges.raw_node_id = tapscript_nodes.node_id ); -- name: DeleteTapscriptTreeRoot :exec @@ -1000,7 +1195,7 @@ DELETE FROM tapscript_roots WHERE root_hash = @root_hash; -- name: FetchGenesisByAssetID :one -SELECT * +SELECT * FROM genesis_info_view WHERE asset_id = $1; @@ -1011,31 +1206,40 @@ INSERT INTO assets_meta ( ) VALUES ( $1, $2, $3, $4, $5, $6, $7 ) ON CONFLICT (meta_data_hash) - -- In this case, we may be inserting the data+type for an existing blob. So - -- we'll set all of those values. At this layer we assume the meta hash - -- has been validated elsewhere. - DO UPDATE SET meta_data_blob = COALESCE(EXCLUDED.meta_data_blob, assets_meta.meta_data_blob), - meta_data_type = COALESCE(EXCLUDED.meta_data_type, assets_meta.meta_data_type), - meta_decimal_display = COALESCE(EXCLUDED.meta_decimal_display, assets_meta.meta_decimal_display), - meta_universe_commitments = COALESCE(EXCLUDED.meta_universe_commitments, assets_meta.meta_universe_commitments), - meta_canonical_universes = COALESCE(EXCLUDED.meta_canonical_universes, assets_meta.meta_canonical_universes), - meta_delegation_key = COALESCE(EXCLUDED.meta_delegation_key, assets_meta.meta_delegation_key) - +-- In this case, we may be inserting the data+type for an existing blob. So +-- we'll set all of those values. At this layer we assume the meta hash +-- has been validated elsewhere. +DO UPDATE SET meta_data_blob += COALESCE(excluded.meta_data_blob, assets_meta.meta_data_blob), +meta_data_type = COALESCE(excluded.meta_data_type, assets_meta.meta_data_type), +meta_decimal_display += COALESCE(excluded.meta_decimal_display, assets_meta.meta_decimal_display), +meta_universe_commitments += COALESCE( + excluded.meta_universe_commitments, assets_meta.meta_universe_commitments +), +meta_canonical_universes += COALESCE( + excluded.meta_canonical_universes, assets_meta.meta_canonical_universes +), +meta_delegation_key += COALESCE(excluded.meta_delegation_key, assets_meta.meta_delegation_key) + RETURNING meta_id; -- name: FetchAssetMeta :one -SELECT sqlc.embed(assets_meta) +SELECT sqlc.embed(assets_meta) -- noqa: RF02,AL03 FROM assets_meta WHERE meta_id = $1; -- name: FetchAssetMetaByHash :one -SELECT sqlc.embed(assets_meta) +SELECT sqlc.embed(assets_meta) -- noqa: RF02,AL03 FROM assets_meta WHERE meta_data_hash = $1; -- name: FetchAssetMetaForAsset :one -SELECT sqlc.embed(assets_meta) -FROM genesis_assets assets +SELECT sqlc.embed(assets_meta) -- noqa: RF02,AL03 +FROM genesis_assets AS assets JOIN assets_meta ON assets.meta_data_id = assets_meta.meta_id WHERE assets.asset_id = $1; @@ -1048,9 +1252,10 @@ WITH target_batch AS ( -- This CTE is used to fetch the ID of a batch, based on the serialized -- internal key associated with the batch. SELECT keys.key_id AS batch_id - FROM internal_keys keys + FROM internal_keys AS keys WHERE keys.raw_key = @batch_key ) + INSERT INTO mint_anchor_uni_commitments ( batch_id, tx_output_index, taproot_internal_key_id, group_key ) @@ -1058,32 +1263,41 @@ VALUES ( (SELECT batch_id FROM target_batch), @tx_output_index, @taproot_internal_key_id, @group_key ) -ON CONFLICT(batch_id, tx_output_index) DO UPDATE SET - -- The following fields are updated if a conflict occurs. - taproot_internal_key_id = EXCLUDED.taproot_internal_key_id, - group_key = EXCLUDED.group_key +ON CONFLICT (batch_id, tx_output_index) DO UPDATE SET +-- The following fields are updated if a conflict occurs. +taproot_internal_key_id = excluded.taproot_internal_key_id, +group_key = excluded.group_key RETURNING id; -- Fetch records from the mint_anchor_uni_commitments table with optional -- filtering. -- name: FetchMintAnchorUniCommitment :many SELECT - mint_anchor_uni_commitments.id, - mint_anchor_uni_commitments.batch_id, - mint_anchor_uni_commitments.tx_output_index, - mint_anchor_uni_commitments.group_key, + commitments.id, + commitments.batch_id, + commitments.tx_output_index, + commitments.group_key, batch_internal_keys.raw_key AS batch_key, - mint_anchor_uni_commitments.taproot_internal_key_id, - sqlc.embed(taproot_internal_keys) -FROM mint_anchor_uni_commitments - JOIN internal_keys taproot_internal_keys - ON mint_anchor_uni_commitments.taproot_internal_key_id = taproot_internal_keys.key_id - LEFT JOIN asset_minting_batches batches - ON mint_anchor_uni_commitments.batch_id = batches.batch_id - LEFT JOIN internal_keys batch_internal_keys - ON batches.batch_id = batch_internal_keys.key_id + commitments.taproot_internal_key_id, + sqlc.embed(taproot_internal_keys) -- noqa: RF02,AL03 +FROM mint_anchor_uni_commitments AS commitments +JOIN internal_keys AS taproot_internal_keys + ON commitments.taproot_internal_key_id = taproot_internal_keys.key_id +LEFT JOIN asset_minting_batches AS batches + ON commitments.batch_id = batches.batch_id +LEFT JOIN internal_keys AS batch_internal_keys + ON batches.batch_id = batch_internal_keys.key_id WHERE ( - (batch_internal_keys.raw_key = sqlc.narg('batch_key') OR sqlc.narg('batch_key') IS NULL) AND - (mint_anchor_uni_commitments.group_key = sqlc.narg('group_key') OR sqlc.narg('group_key') IS NULL) AND - (taproot_internal_keys.raw_key = sqlc.narg('taproot_internal_key_raw') OR sqlc.narg('taproot_internal_key_raw') IS NULL) + ( + batch_internal_keys.raw_key = sqlc.narg('batch_key') + OR sqlc.narg('batch_key') IS NULL + ) + AND ( + commitments.group_key = sqlc.narg('group_key') + OR sqlc.narg('group_key') IS NULL + ) + AND ( + taproot_internal_keys.raw_key = sqlc.narg('taproot_internal_key_raw') + OR sqlc.narg('taproot_internal_key_raw') IS NULL + ) ); diff --git a/tapdb/sqlc/queries/macaroons.sql b/tapdb/sqlc/queries/macaroons.sql index 7ef620c0f..2b21ea659 100644 --- a/tapdb/sqlc/queries/macaroons.sql +++ b/tapdb/sqlc/queries/macaroons.sql @@ -1,5 +1,5 @@ -- name: GetRootKey :one -SELECT * FROM macaroons +SELECT * FROM macaroons WHERE id = $1; -- name: InsertRootKey :exec diff --git a/tapdb/sqlc/queries/metadata.sql b/tapdb/sqlc/queries/metadata.sql index ce383a79a..840ddff64 100644 --- a/tapdb/sqlc/queries/metadata.sql +++ b/tapdb/sqlc/queries/metadata.sql @@ -2,5 +2,5 @@ SELECT pg_catalog.pg_database_size(current_database()) AS size; -- name: AssetsDBSizeSqlite :one -SELECT page_count * page_size AS size_in_bytes -FROM pragma_page_count(), pragma_page_size(); \ No newline at end of file +SELECT pc.page_count * ps.page_size AS size_in_bytes +FROM pragma_page_count() AS pc, pragma_page_size() AS ps; diff --git a/tapdb/sqlc/queries/mssmt.sql b/tapdb/sqlc/queries/mssmt.sql index 4ff4d013e..b5522fe08 100644 --- a/tapdb/sqlc/queries/mssmt.sql +++ b/tapdb/sqlc/queries/mssmt.sql @@ -16,53 +16,100 @@ INSERT INTO mssmt_nodes ( -- name: FetchChildren :many WITH RECURSIVE mssmt_branches_cte ( hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth -) -AS ( - SELECT r.hash_key, r.l_hash_key, r.r_hash_key, r.key, r.value, r.sum, r.namespace, 0 as depth - FROM mssmt_nodes r +) AS ( + SELECT + r.hash_key, + r.l_hash_key, + r.r_hash_key, + r.key, + r.value, + r.sum, + r.namespace, + 0 AS depth + FROM mssmt_nodes AS r WHERE r.hash_key = $1 AND r.namespace = $2 UNION ALL - SELECT n.hash_key, n.l_hash_key, n.r_hash_key, n.key, n.value, n.sum, n.namespace, depth+1 - FROM mssmt_nodes n, mssmt_branches_cte b - WHERE n.namespace=b.namespace AND (n.hash_key=b.l_hash_key OR n.hash_key=b.r_hash_key) - /* + SELECT + n.hash_key, + n.l_hash_key, + n.r_hash_key, + n.key, + n.value, + n.sum, + n.namespace, + b.depth + 1 + FROM + mssmt_nodes AS n, + mssmt_branches_cte AS b + WHERE + n.namespace = b.namespace + AND (n.hash_key = b.l_hash_key OR n.hash_key = b.r_hash_key) +/* Limit the result set to 3 items. The first is always the root node, while the following 0, 1 or 2 nodes represent children of the root node. These children can either be the next level children, or one next level and one from the level after that. In the future we may use this limit to fetch entire subtrees too. */ -) SELECT * FROM mssmt_branches_cte WHERE depth < 3; +) + +SELECT * FROM mssmt_branches_cte +WHERE depth < 3; -- name: FetchChildrenSelfJoin :many WITH subtree_cte ( hash_key, l_hash_key, r_hash_key, key, value, sum, namespace, depth ) AS ( - SELECT r.hash_key, r.l_hash_key, r.r_hash_key, r.key, r.value, r.sum, r.namespace, 0 as depth - FROM mssmt_nodes r - WHERE r.hash_key = $1 AND r.namespace = $2 - UNION ALL - SELECT c.hash_key, c.l_hash_key, c.r_hash_key, c.key, c.value, c.sum, c.namespace, depth+1 - FROM mssmt_nodes c - INNER JOIN subtree_cte r ON r.l_hash_key=c.hash_key OR r.r_hash_key=c.hash_key -) SELECT * from subtree_cte WHERE depth < 3; + SELECT + r.hash_key, + r.l_hash_key, + r.r_hash_key, + r.key, + r.value, + r.sum, + r.namespace, + 0 AS depth + FROM mssmt_nodes AS r + WHERE r.hash_key = $1 AND r.namespace = $2 + UNION ALL + SELECT + c.hash_key, + c.l_hash_key, + c.r_hash_key, + c.key, + c.value, + c.sum, + c.namespace, + r.depth + 1 + FROM mssmt_nodes AS c + INNER JOIN + subtree_cte AS r + ON c.hash_key = r.l_hash_key OR c.hash_key = r.r_hash_key +) + +SELECT * FROM subtree_cte +WHERE depth < 3; -- name: DeleteNode :execrows -DELETE FROM mssmt_nodes WHERE hash_key = $1 AND namespace = $2; +DELETE FROM mssmt_nodes +WHERE hash_key = $1 AND namespace = $2; -- name: DeleteAllNodes :execrows -DELETE FROM mssmt_nodes WHERE namespace = $1; +DELETE FROM mssmt_nodes +WHERE namespace = $1; -- name: DeleteRoot :execrows -DELETE FROM mssmt_roots WHERE namespace = $1; +DELETE FROM mssmt_roots +WHERE namespace = $1; -- name: FetchRootNode :one SELECT nodes.* -FROM mssmt_nodes nodes -JOIN mssmt_roots roots - ON roots.root_hash = nodes.hash_key AND - roots.namespace = $1; +FROM mssmt_nodes AS nodes +JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND roots.namespace = $1; -- name: UpsertRootNode :exec INSERT INTO mssmt_roots ( @@ -70,8 +117,8 @@ INSERT INTO mssmt_roots ( ) VALUES ( $1, $2 ) ON CONFLICT (namespace) - -- Not a NOP, we always overwrite the root hash. - DO UPDATE SET root_hash = EXCLUDED.root_hash; +-- Not a NOP, we always overwrite the root hash. +DO UPDATE SET root_hash = excluded.root_hash; -- name: FetchAllNodes :many SELECT * FROM mssmt_nodes; diff --git a/tapdb/sqlc/queries/transfers.sql b/tapdb/sqlc/queries/transfers.sql index 56acc8ca9..bdd7fec64 100644 --- a/tapdb/sqlc/queries/transfers.sql +++ b/tapdb/sqlc/queries/transfers.sql @@ -1,9 +1,10 @@ -- name: InsertAssetTransfer :one -WITH target_txn(txn_id) AS ( +WITH target_txn (txn_id) AS ( SELECT txn_id FROM chain_txns WHERE txid = @anchor_txid ) + INSERT INTO asset_transfers ( height_hint, anchor_txn_id, transfer_time_unix, label, skip_anchor_tx_broadcast @@ -31,59 +32,84 @@ INSERT INTO asset_transfer_outputs ( ); -- name: SetTransferOutputProofDeliveryStatus :exec -WITH target(output_id) AS ( - SELECT output_id - FROM asset_transfer_outputs output +WITH target (output_id) AS ( + SELECT output.output_id + FROM asset_transfer_outputs AS output JOIN managed_utxos - ON output.anchor_utxo = managed_utxos.utxo_id - WHERE managed_utxos.outpoint = @serialized_anchor_outpoint - AND output.position = @position + ON output.anchor_utxo = managed_utxos.utxo_id + WHERE + managed_utxos.outpoint = @serialized_anchor_outpoint + AND output.position = @position ) + UPDATE asset_transfer_outputs SET proof_delivery_complete = @delivery_complete WHERE output_id = (SELECT output_id FROM target); -- name: QueryAssetTransfers :many SELECT - id, height_hint, txns.txid, txns.block_hash AS anchor_tx_block_hash, - transfer_time_unix, transfers.label, + transfers.id, + transfers.height_hint, + txns.txid, + txns.block_hash AS anchor_tx_block_hash, + transfers.transfer_time_unix, + transfers.label, transfers.skip_anchor_tx_broadcast -FROM asset_transfers transfers -JOIN chain_txns txns - ON txns.txn_id = transfers.anchor_txn_id +FROM asset_transfers AS transfers +JOIN chain_txns AS txns + ON transfers.anchor_txn_id = txns.txn_id WHERE -- Optionally filter on a given anchor_tx_hash. - (txns.txid = sqlc.narg('anchor_tx_hash') - OR sqlc.narg('anchor_tx_hash') IS NULL) + ( + txns.txid = sqlc.narg('anchor_tx_hash') + OR sqlc.narg('anchor_tx_hash') IS NULL + ) -- Filter for pending transfers only if requested. AND ( - @pending_transfers_only = true AND - ( + @pending_transfers_only = TRUE + AND ( txns.block_hash IS NULL - OR EXISTS ( - SELECT 1 - FROM asset_transfer_outputs outputs - WHERE outputs.transfer_id = transfers.id - AND outputs.proof_delivery_complete = false - ) + OR EXISTS ( + SELECT 1 + FROM asset_transfer_outputs AS outputs + WHERE + outputs.transfer_id = transfers.id + AND outputs.proof_delivery_complete = FALSE + ) ) - OR @pending_transfers_only = false OR @pending_transfers_only IS NULL + OR @pending_transfers_only = FALSE OR @pending_transfers_only IS NULL ) -ORDER BY transfer_time_unix; +ORDER BY transfers.transfer_time_unix; -- name: FetchTransferInputs :many -SELECT input_id, anchor_point, asset_id, script_key, amount -FROM asset_transfer_inputs inputs +SELECT + input_id, + anchor_point, + asset_id, + script_key, + amount +FROM asset_transfer_inputs AS inputs WHERE transfer_id = $1 ORDER BY input_id; -- name: FetchTransferOutputs :many SELECT - output_id, proof_suffix, amount, serialized_witnesses, script_key_local, - split_commitment_root_hash, split_commitment_root_value, num_passive_assets, - output_type, proof_courier_addr, proof_delivery_complete, position, - asset_version, lock_time, relative_lock_time, + outputs.output_id, + outputs.proof_suffix, + outputs.amount, + outputs.serialized_witnesses, + outputs.script_key_local, + outputs.split_commitment_root_hash, + outputs.split_commitment_root_value, + outputs.num_passive_assets, + outputs.output_type, + outputs.proof_courier_addr, + outputs.proof_delivery_complete, + outputs.position, + outputs.asset_version, + outputs.lock_time, + outputs.relative_lock_time, utxos.utxo_id AS anchor_utxo_id, utxos.outpoint AS anchor_outpoint, utxos.amt_sats AS anchor_value, @@ -94,26 +120,30 @@ SELECT utxo_internal_keys.raw_key AS internal_key_raw_key_bytes, utxo_internal_keys.key_family AS internal_key_family, utxo_internal_keys.key_index AS internal_key_index, - sqlc.embed(script_keys), - sqlc.embed(script_internal_keys) -FROM asset_transfer_outputs outputs -JOIN managed_utxos utxos - ON outputs.anchor_utxo = utxos.utxo_id + sqlc.embed(script_keys), -- noqa: RF02,AL03 + sqlc.embed(script_internal_keys) -- noqa: RF02,AL03 +FROM asset_transfer_outputs AS outputs +JOIN managed_utxos AS utxos + ON outputs.anchor_utxo = utxos.utxo_id JOIN script_keys - ON outputs.script_key = script_keys.script_key_id -JOIN internal_keys script_internal_keys - ON script_keys.internal_key_id = script_internal_keys.key_id -JOIN internal_keys utxo_internal_keys - ON utxos.internal_key_id = utxo_internal_keys.key_id -WHERE transfer_id = $1 -ORDER BY output_id; + ON outputs.script_key = script_keys.script_key_id +JOIN internal_keys AS script_internal_keys + ON script_keys.internal_key_id = script_internal_keys.key_id +JOIN internal_keys AS utxo_internal_keys + ON utxos.internal_key_id = utxo_internal_keys.key_id +WHERE outputs.transfer_id = $1 +ORDER BY outputs.output_id; -- name: ApplyPendingOutput :one WITH spent_asset AS ( - SELECT genesis_id, asset_group_witness_id, script_version + SELECT + assets.genesis_id, + assets.asset_group_witness_id, + assets.script_version FROM assets WHERE assets.asset_id = @spent_asset_id ) + INSERT INTO assets ( genesis_id, version, asset_group_witness_id, script_version, lock_time, relative_lock_time, script_key_id, anchor_utxo_id, amount, @@ -127,14 +157,15 @@ INSERT INTO assets ( @split_commitment_root_hash, @split_commitment_root_value, @spent ) ON CONFLICT (genesis_id, script_key_id, anchor_utxo_id) - -- This is a NOP, anchor_utxo_id is one of the unique fields that caused the - -- conflict. - DO UPDATE SET anchor_utxo_id = EXCLUDED.anchor_utxo_id +-- This is a NOP, anchor_utxo_id is one of the unique fields that caused the +-- conflict. +DO UPDATE SET anchor_utxo_id = excluded.anchor_utxo_id RETURNING asset_id; -- name: ReAnchorPassiveAssets :exec UPDATE assets -SET anchor_utxo_id = @new_anchor_utxo_id, +SET + anchor_utxo_id = @new_anchor_utxo_id, -- The following fields need to be the same fields we reset in -- Asset.CopySpendTemplate. split_commitment_root_hash = NULL, @@ -157,24 +188,27 @@ INSERT INTO proof_transfer_log ( -- name: QueryProofTransferAttempts :many SELECT time_unix FROM proof_transfer_log -WHERE proof_locator_hash = @proof_locator_hash +WHERE + proof_locator_hash = @proof_locator_hash AND transfer_type = @transfer_type ORDER BY time_unix DESC; -- name: InsertPassiveAsset :exec -WITH target_asset(asset_id) AS ( +WITH target_asset (asset_id) AS ( SELECT assets.asset_id FROM assets - JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id - JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id - WHERE genesis_assets.asset_id = @asset_genesis_id + JOIN genesis_assets + ON assets.genesis_id = genesis_assets.gen_asset_id + JOIN managed_utxos AS utxos + ON assets.anchor_utxo_id = utxos.utxo_id + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE + genesis_assets.asset_id = @asset_genesis_id AND utxos.outpoint = @prev_outpoint AND script_keys.tweaked_script_key = @script_key ) + INSERT INTO passive_assets ( asset_id, transfer_id, new_anchor_utxo, script_key, new_witness_stack, new_proof, asset_version @@ -184,17 +218,22 @@ INSERT INTO passive_assets ( ); -- name: QueryPassiveAssets :many -SELECT passive.asset_id, passive.new_anchor_utxo, passive.script_key, - passive.new_witness_stack, passive.new_proof, - genesis_assets.asset_id AS genesis_id, passive.asset_version, - utxos.outpoint -FROM passive_assets as passive - JOIN assets - ON passive.asset_id = assets.asset_id - JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON passive.new_anchor_utxo = utxos.utxo_id +SELECT + passive.asset_id, + passive.new_anchor_utxo, + passive.script_key, + passive.new_witness_stack, + passive.new_proof, + genesis_assets.asset_id AS genesis_id, + passive.asset_version, + utxos.outpoint +FROM passive_assets AS passive +JOIN assets + ON passive.asset_id = assets.asset_id +JOIN genesis_assets + ON assets.genesis_id = genesis_assets.gen_asset_id +JOIN managed_utxos AS utxos + ON passive.new_anchor_utxo = utxos.utxo_id WHERE passive.transfer_id = @transfer_id; -- name: InsertBurn :one @@ -213,9 +252,9 @@ SELECT abt.group_key, abt.amount, ct.txid AS anchor_txid -- Retrieving the txid from chain_txns. -FROM asset_burn_transfers abt -JOIN asset_transfers at ON abt.transfer_id = at.id -JOIN chain_txns ct ON at.anchor_txn_id = ct.txn_id +FROM asset_burn_transfers AS abt +JOIN asset_transfers AS at ON abt.transfer_id = at.id +JOIN chain_txns AS ct ON at.anchor_txn_id = ct.txn_id WHERE -- Optionally filter by asset_id. (abt.asset_id = @asset_id OR @asset_id IS NULL) diff --git a/tapdb/sqlc/queries/universe.sql b/tapdb/sqlc/queries/universe.sql index ba94fb189..f9cbdd7ff 100644 --- a/tapdb/sqlc/queries/universe.sql +++ b/tapdb/sqlc/queries/universe.sql @@ -1,15 +1,20 @@ -- name: FetchUniverseRoot :one -SELECT universe_roots.asset_id, group_key, proof_type, - mssmt_nodes.hash_key root_hash, mssmt_nodes.sum root_sum, - genesis_assets.asset_tag asset_name +SELECT + universe_roots.asset_id, + universe_roots.group_key, + universe_roots.proof_type, + mssmt_nodes.hash_key AS root_hash, + mssmt_nodes.sum AS root_sum, + genesis_assets.asset_tag AS asset_name FROM universe_roots -JOIN mssmt_roots +JOIN mssmt_roots ON universe_roots.namespace_root = mssmt_roots.namespace -JOIN mssmt_nodes - ON mssmt_nodes.hash_key = mssmt_roots.root_hash - AND mssmt_nodes.namespace = mssmt_roots.namespace +JOIN mssmt_nodes + ON + mssmt_roots.root_hash = mssmt_nodes.hash_key + AND mssmt_roots.namespace = mssmt_nodes.namespace JOIN genesis_assets - ON genesis_assets.asset_id = universe_roots.asset_id + ON universe_roots.asset_id = genesis_assets.asset_id WHERE mssmt_nodes.namespace = @namespace; -- name: UpsertUniverseRoot :one @@ -18,9 +23,9 @@ INSERT INTO universe_roots ( ) VALUES ( @namespace_root, @asset_id, @group_key, @proof_type ) ON CONFLICT (namespace_root) - -- This is a NOP, namespace_root is the unique field that caused the - -- conflict. - DO UPDATE SET namespace_root = EXCLUDED.namespace_root +-- This is a NOP, namespace_root is the unique field that caused the +-- conflict. +DO UPDATE SET namespace_root = excluded.namespace_root RETURNING id; -- name: DeleteUniverseEvents :exec @@ -29,6 +34,7 @@ WITH root_id AS ( FROM universe_roots WHERE namespace_root = @namespace_root ) + DELETE FROM universe_events WHERE universe_root_id = (SELECT id FROM root_id); @@ -38,42 +44,54 @@ WHERE namespace_root = @namespace_root; -- name: UpsertUniverseLeaf :exec INSERT INTO universe_leaves ( - asset_genesis_id, script_key_bytes, universe_root_id, leaf_node_key, + asset_genesis_id, script_key_bytes, universe_root_id, leaf_node_key, leaf_node_namespace, minting_point ) VALUES ( @asset_genesis_id, @script_key_bytes, @universe_root_id, @leaf_node_key, @leaf_node_namespace, @minting_point ) ON CONFLICT (minting_point, script_key_bytes, leaf_node_namespace) - -- This is a NOP, minting_point and script_key_bytes are the unique fields - -- that caused the conflict. - DO UPDATE SET minting_point = EXCLUDED.minting_point, - script_key_bytes = EXCLUDED.script_key_bytes, - leaf_node_namespace = EXCLUDED.leaf_node_namespace; +-- This is a NOP, minting_point and script_key_bytes are the unique fields +-- that caused the conflict. +DO UPDATE SET minting_point = excluded.minting_point, +script_key_bytes = excluded.script_key_bytes, +leaf_node_namespace = excluded.leaf_node_namespace; -- name: DeleteUniverseLeaves :exec DELETE FROM universe_leaves WHERE leaf_node_namespace = @namespace; -- name: QueryUniverseLeaves :many -SELECT leaves.script_key_bytes, gen.gen_asset_id, nodes.value AS genesis_proof, - nodes.sum AS sum_amt, gen.asset_id +SELECT + leaves.script_key_bytes, + gen.gen_asset_id, + nodes.value AS genesis_proof, + nodes.sum AS sum_amt, + gen.asset_id FROM universe_leaves AS leaves JOIN mssmt_nodes AS nodes - ON leaves.leaf_node_key = nodes.key - AND leaves.leaf_node_namespace = nodes.namespace + ON + leaves.leaf_node_key = nodes.key + AND leaves.leaf_node_namespace = nodes.namespace JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id -WHERE leaves.leaf_node_namespace = @namespace - AND (leaves.minting_point = sqlc.narg('minting_point_bytes') OR - sqlc.narg('minting_point_bytes') IS NULL) - AND (leaves.script_key_bytes = sqlc.narg('script_key_bytes') OR - sqlc.narg('script_key_bytes') IS NULL); +WHERE + leaves.leaf_node_namespace = @namespace + AND ( + leaves.minting_point = sqlc.narg('minting_point_bytes') + OR sqlc.narg('minting_point_bytes') IS NULL + ) + AND ( + leaves.script_key_bytes = sqlc.narg('script_key_bytes') + OR sqlc.narg('script_key_bytes') IS NULL + ); -- name: FetchUniverseKeys :many -SELECT leaves.minting_point, leaves.script_key_bytes +SELECT + leaves.minting_point, + leaves.script_key_bytes FROM universe_leaves AS leaves WHERE leaves.leaf_node_namespace = @namespace -ORDER BY +ORDER BY CASE WHEN sqlc.narg('sort_direction') = 0 THEN leaves.id END ASC, CASE WHEN sqlc.narg('sort_direction') = 1 THEN leaves.id END DESC LIMIT @num_limit OFFSET @num_offset; @@ -82,24 +100,29 @@ LIMIT @num_limit OFFSET @num_offset; SELECT * FROM universe_leaves; -- name: UniverseRoots :many -SELECT universe_roots.asset_id, group_key, proof_type, - mssmt_roots.root_hash AS root_hash, mssmt_nodes.sum AS root_sum, - genesis_assets.asset_tag AS asset_name +SELECT + universe_roots.asset_id, + universe_roots.group_key, + universe_roots.proof_type, + mssmt_roots.root_hash, + mssmt_nodes.sum AS root_sum, + genesis_assets.asset_tag AS asset_name FROM universe_roots JOIN mssmt_roots ON universe_roots.namespace_root = mssmt_roots.namespace JOIN mssmt_nodes - ON mssmt_nodes.hash_key = mssmt_roots.root_hash - AND mssmt_nodes.namespace = mssmt_roots.namespace + ON + mssmt_roots.root_hash = mssmt_nodes.hash_key + AND mssmt_roots.namespace = mssmt_nodes.namespace JOIN genesis_assets - ON genesis_assets.asset_id = universe_roots.asset_id -ORDER BY + ON universe_roots.asset_id = genesis_assets.asset_id +ORDER BY CASE WHEN sqlc.narg('sort_direction') = 0 THEN universe_roots.id END ASC, CASE WHEN sqlc.narg('sort_direction') = 1 THEN universe_roots.id END DESC LIMIT @num_limit OFFSET @num_offset; -- name: InsertUniverseServer :exec -INSERT INTO universe_servers( +INSERT INTO universe_servers ( server_host, last_sync_time ) VALUES ( @server_host, @last_sync_time @@ -116,107 +139,138 @@ WHERE server_host = @target_server; -- name: QueryUniverseServers :many SELECT * FROM universe_servers -WHERE (id = sqlc.narg('id') OR sqlc.narg('id') IS NULL) AND - (server_host = sqlc.narg('server_host') - OR sqlc.narg('server_host') IS NULL); +WHERE + (id = sqlc.narg('id') OR sqlc.narg('id') IS NULL) + AND ( + server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL + ); -- name: InsertNewSyncEvent :exec WITH group_key_root_id AS ( - SELECT id - FROM universe_roots roots - WHERE group_key = @group_key_x_only - AND roots.proof_type = @proof_type -), asset_id_root_id AS ( + SELECT roots.id + FROM universe_roots AS roots + WHERE + roots.group_key = @group_key_x_only + AND roots.proof_type = @proof_type +), + +asset_id_root_id AS ( SELECT leaves.universe_root_id AS id - FROM universe_leaves leaves - JOIN universe_roots roots + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN genesis_info_view gen + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id - WHERE gen.asset_id = @asset_id + WHERE + gen.asset_id = @asset_id AND roots.proof_type = @proof_type LIMIT 1 ) + INSERT INTO universe_events ( event_type, universe_root_id, event_time, event_timestamp ) VALUES ( 'SYNC', - CASE WHEN length(@group_key_x_only) > 0 THEN ( + CASE + WHEN length(@group_key_x_only) > 0 THEN ( SELECT id FROM group_key_root_id ) ELSE ( SELECT id FROM asset_id_root_id - ) END, + ) + END, @event_time, @event_timestamp ); -- name: InsertNewProofEvent :exec WITH group_key_root_id AS ( - SELECT id - FROM universe_roots roots - WHERE group_key = @group_key_x_only + SELECT roots.id + FROM universe_roots AS roots + WHERE + roots.group_key = @group_key_x_only AND roots.proof_type = @proof_type -), asset_id_root_id AS ( +), + +asset_id_root_id AS ( SELECT leaves.universe_root_id AS id - FROM universe_leaves leaves - JOIN universe_roots roots + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN genesis_info_view gen + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id - WHERE gen.asset_id = @asset_id + WHERE + gen.asset_id = @asset_id AND roots.proof_type = @proof_type LIMIT 1 ) + INSERT INTO universe_events ( event_type, universe_root_id, event_time, event_timestamp ) VALUES ( 'NEW_PROOF', - CASE WHEN length(@group_key_x_only) > 0 THEN ( + CASE + WHEN length(@group_key_x_only) > 0 THEN ( SELECT id FROM group_key_root_id ) ELSE ( SELECT id FROM asset_id_root_id - ) END, + ) + END, @event_time, @event_timestamp ); -- name: QueryUniverseStats :one WITH stats AS ( - SELECT total_asset_syncs, total_asset_proofs + SELECT + total_asset_syncs, + total_asset_proofs FROM universe_stats -), group_ids AS ( +), + +group_ids AS ( SELECT id FROM universe_roots WHERE group_key IS NOT NULL -), asset_keys AS ( - SELECT hash_key - FROM mssmt_nodes nodes - JOIN mssmt_roots roots - ON nodes.hash_key = roots.root_hash AND - nodes.namespace = roots.namespace - JOIN universe_roots uroots - ON roots.namespace = uroots.namespace_root -), aggregated AS ( - SELECT COALESCE(SUM(stats.total_asset_syncs), 0) AS total_syncs, - COALESCE(SUM(stats.total_asset_proofs), 0) AS total_proofs, - 0 AS total_num_groups, - 0 AS total_num_assets +), + +asset_keys AS ( + SELECT nodes.hash_key + FROM mssmt_nodes AS nodes + JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND nodes.namespace = roots.namespace + JOIN universe_roots AS uroots + ON roots.namespace = uroots.namespace_root +), + +aggregated AS ( + SELECT + coalesce(sum(stats.total_asset_syncs), 0) AS total_syncs, + coalesce(sum(stats.total_asset_proofs), 0) AS total_proofs, + 0 AS total_num_groups, + 0 AS total_num_assets FROM stats UNION ALL - SELECT 0 AS total_syncs, - 0 AS total_proofs, - COALESCE(COUNT(group_ids.id), 0) AS total_num_groups, - 0 AS total_num_assets + SELECT + 0 AS total_syncs, + 0 AS total_proofs, + coalesce(count(group_ids.id), 0) AS total_num_groups, + 0 AS total_num_assets FROM group_ids UNION ALL - SELECT 0 AS total_syncs, - 0 AS total_proofs, - 0 AS total_num_groups, - COALESCE(COUNT(asset_keys.hash_key), 0) AS total_num_assets + SELECT + 0 AS total_syncs, + 0 AS total_proofs, + 0 AS total_num_groups, + coalesce(count(asset_keys.hash_key), 0) AS total_num_assets FROM asset_keys ) -SELECT SUM(total_syncs) AS total_syncs, - SUM(total_proofs) AS total_proofs, - SUM(total_num_groups) AS total_num_groups, - SUM(total_num_assets) AS total_num_assets + +SELECT + sum(total_syncs) AS total_syncs, + sum(total_proofs) AS total_proofs, + sum(total_num_groups) AS total_num_groups, + sum(total_num_assets) AS total_num_assets FROM aggregated; -- TODO(roasbeef): use the universe id instead for the grouping? so namespace @@ -224,56 +278,80 @@ FROM aggregated; -- name: QueryUniverseAssetStats :many WITH asset_supply AS ( - SELECT SUM(nodes.sum) AS supply, gen.asset_id AS asset_id - FROM universe_leaves leaves - JOIN universe_roots roots + SELECT + sum(nodes.sum) AS supply, + gen.asset_id + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN mssmt_nodes nodes - ON leaves.leaf_node_key = nodes.key AND - leaves.leaf_node_namespace = nodes.namespace - JOIN genesis_info_view gen + JOIN mssmt_nodes AS nodes + ON + leaves.leaf_node_key = nodes.key + AND leaves.leaf_node_namespace = nodes.namespace + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id WHERE roots.proof_type = 'issuance' GROUP BY gen.asset_id -), group_supply AS ( - SELECT sum AS num_assets, uroots.group_key AS group_key - FROM mssmt_nodes nodes - JOIN mssmt_roots roots - ON nodes.hash_key = roots.root_hash AND - nodes.namespace = roots.namespace - JOIN universe_roots uroots - ON roots.namespace = uroots.namespace_root +), + +group_supply AS ( + SELECT + nodes.sum AS num_assets, + uroots.group_key + FROM mssmt_nodes AS nodes + JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND nodes.namespace = roots.namespace + JOIN universe_roots AS uroots + ON roots.namespace = uroots.namespace_root WHERE uroots.proof_type = 'issuance' -), asset_info AS ( - SELECT asset_supply.supply, group_supply.num_assets AS group_supply, - gen.asset_id AS asset_id, - gen.asset_tag AS asset_name, gen.asset_type AS asset_type, - gen.block_height AS genesis_height, gen.prev_out AS genesis_prev_out, - group_info.tweaked_group_key AS group_key, - gen.output_index AS anchor_index, gen.anchor_txid AS anchor_txid - FROM genesis_info_view gen +), + +asset_info AS ( + SELECT + asset_supply.supply, + group_supply.num_assets AS group_supply, + gen.asset_id, + gen.asset_tag AS asset_name, + gen.asset_type, + gen.block_height AS genesis_height, + gen.prev_out AS genesis_prev_out, + group_info.tweaked_group_key AS group_key, + gen.output_index AS anchor_index, + gen.anchor_txid + FROM genesis_info_view AS gen JOIN asset_supply - ON asset_supply.asset_id = gen.asset_id + ON gen.asset_id = asset_supply.asset_id -- We use a LEFT JOIN here as not every asset has a group key, so this'll -- generate rows that have NULL values for the group key fields if an asset -- doesn't have a group key. - LEFT JOIN key_group_info_view group_info + LEFT JOIN key_group_info_view AS group_info ON gen.gen_asset_id = group_info.gen_asset_id LEFT JOIN group_supply - ON group_supply.group_key = group_info.x_only_group_key - WHERE (gen.asset_tag = sqlc.narg('asset_name') OR sqlc.narg('asset_name') IS NULL) AND - (gen.asset_type = sqlc.narg('asset_type') OR sqlc.narg('asset_type') IS NULL) AND - (gen.asset_id = sqlc.narg('asset_id') OR sqlc.narg('asset_id') IS NULL) + ON group_info.x_only_group_key = group_supply.group_key + WHERE ( + gen.asset_tag = sqlc.narg('asset_name') + OR sqlc.narg('asset_name') IS NULL + ) + AND ( + gen.asset_type = sqlc.narg('asset_type') + OR sqlc.narg('asset_type') IS NULL + ) + AND (gen.asset_id = sqlc.narg('asset_id') OR sqlc.narg('asset_id') IS NULL) ) -SELECT asset_info.supply AS asset_supply, - asset_info.group_supply AS group_supply, - asset_info.asset_name AS asset_name, - asset_info.asset_type AS asset_type, asset_info.asset_id AS asset_id, - asset_info.genesis_height AS genesis_height, - asset_info.genesis_prev_out AS genesis_prev_out, - asset_info.group_key AS group_key, - asset_info.anchor_index AS anchor_index, - asset_info.anchor_txid AS anchor_txid, + +SELECT + asset_info.supply AS asset_supply, + asset_info.group_supply, + asset_info.asset_name, + asset_info.asset_type, + asset_info.asset_id, + asset_info.genesis_height, + asset_info.genesis_prev_out, + asset_info.group_key, + asset_info.anchor_index, + asset_info.anchor_txid, universe_stats.total_asset_syncs AS total_syncs, universe_stats.total_asset_proofs AS total_proofs FROM asset_info @@ -281,56 +359,131 @@ JOIN universe_stats ON asset_info.asset_id = universe_stats.asset_id WHERE universe_stats.proof_type = 'issuance' ORDER BY - CASE WHEN sqlc.narg('sort_by') = 'asset_id' AND sqlc.narg('sort_direction') = 0 THEN - asset_info.asset_id END ASC, - CASE WHEN sqlc.narg('sort_by') = 'asset_id' AND sqlc.narg('sort_direction') = 1 THEN - asset_info.asset_id END DESC, - CASE WHEN sqlc.narg('sort_by') = 'asset_name' AND sqlc.narg('sort_direction') = 0 THEN - asset_info.asset_name END ASC , - CASE WHEN sqlc.narg('sort_by') = 'asset_name' AND sqlc.narg('sort_direction') = 1 THEN - asset_info.asset_name END DESC , - CASE WHEN sqlc.narg('sort_by') = 'asset_type' AND sqlc.narg('sort_direction') = 0 THEN - asset_info.asset_type END ASC , - CASE WHEN sqlc.narg('sort_by') = 'asset_type' AND sqlc.narg('sort_direction') = 1 THEN - asset_info.asset_type END DESC, - CASE WHEN sqlc.narg('sort_by') = 'total_syncs' AND sqlc.narg('sort_direction') = 0 THEN - universe_stats.total_asset_syncs END ASC , - CASE WHEN sqlc.narg('sort_by') = 'total_syncs' AND sqlc.narg('sort_direction') = 1 THEN - universe_stats.total_asset_syncs END DESC, - CASE WHEN sqlc.narg('sort_by') = 'total_proofs' AND sqlc.narg('sort_direction') = 0 THEN - universe_stats.total_asset_proofs END ASC , - CASE WHEN sqlc.narg('sort_by') = 'total_proofs' AND sqlc.narg('sort_direction') = 1 THEN - universe_stats.total_asset_proofs END DESC, - CASE WHEN sqlc.narg('sort_by') = 'genesis_height' AND sqlc.narg('sort_direction') = 0 THEN - asset_info.genesis_height END ASC , - CASE WHEN sqlc.narg('sort_by') = 'genesis_height' AND sqlc.narg('sort_direction') = 1 THEN - asset_info.genesis_height END DESC, - CASE WHEN sqlc.narg('sort_by') = 'total_supply' AND sqlc.narg('sort_direction') = 0 THEN - asset_info.supply END ASC , - CASE WHEN sqlc.narg('sort_by') = 'total_supply' AND sqlc.narg('sort_direction') = 1 THEN - asset_info.supply END DESC + CASE + WHEN + sqlc.narg('sort_by') = 'asset_id' + AND sqlc.narg('sort_direction') = 0 + THEN + asset_info.asset_id + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'asset_id' + AND sqlc.narg('sort_direction') = 1 + THEN + asset_info.asset_id + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'asset_name' + AND sqlc.narg('sort_direction') = 0 + THEN + asset_info.asset_name + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'asset_name' + AND sqlc.narg('sort_direction') = 1 + THEN + asset_info.asset_name + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'asset_type' + AND sqlc.narg('sort_direction') = 0 + THEN + asset_info.asset_type + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'asset_type' + AND sqlc.narg('sort_direction') = 1 + THEN + asset_info.asset_type + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_syncs' + AND sqlc.narg('sort_direction') = 0 + THEN + universe_stats.total_asset_syncs + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_syncs' + AND sqlc.narg('sort_direction') = 1 + THEN + universe_stats.total_asset_syncs + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_proofs' + AND sqlc.narg('sort_direction') = 0 + THEN + universe_stats.total_asset_proofs + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_proofs' + AND sqlc.narg('sort_direction') = 1 + THEN + universe_stats.total_asset_proofs + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'genesis_height' + AND sqlc.narg('sort_direction') = 0 + THEN + asset_info.genesis_height + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'genesis_height' + AND sqlc.narg('sort_direction') = 1 + THEN + asset_info.genesis_height + END DESC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_supply' + AND sqlc.narg('sort_direction') = 0 + THEN + asset_info.supply + END ASC, + CASE + WHEN + sqlc.narg('sort_by') = 'total_supply' + AND sqlc.narg('sort_direction') = 1 + THEN + asset_info.supply + END DESC LIMIT @num_limit OFFSET @num_offset; -- name: QueryAssetStatsPerDaySqlite :many SELECT - cast(strftime('%Y-%m-%d', datetime(event_timestamp, 'unixepoch')) as text) AS day, - SUM(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, - SUM(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) AS new_proof_events + cast(strftime('%Y-%m-%d', datetime(event_timestamp, 'unixepoch')) AS text) + AS day, + sum(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, + sum(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) + AS new_proof_events FROM universe_events -WHERE event_type IN ('SYNC', 'NEW_PROOF') AND - event_timestamp >= @start_time AND event_timestamp <= @end_time +WHERE + event_type IN ('SYNC', 'NEW_PROOF') + AND event_timestamp >= @start_time AND event_timestamp <= @end_time GROUP BY day ORDER BY day; -- name: QueryAssetStatsPerDayPostgres :many SELECT to_char(to_timestamp(event_timestamp), 'YYYY-MM-DD') AS day, - SUM(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, - SUM(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) AS new_proof_events + sum(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, + sum(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) + AS new_proof_events FROM universe_events -- BETWEEN is inclusive for both start and end values. -WHERE event_type IN ('SYNC', 'NEW_PROOF') - AND event_timestamp BETWEEN @start_time AND @end_time +WHERE + event_type IN ('SYNC', 'NEW_PROOF') + AND event_timestamp BETWEEN @start_time AND @end_time GROUP BY day ORDER BY day; @@ -339,35 +492,54 @@ INSERT INTO federation_global_sync_config ( proof_type, allow_sync_insert, allow_sync_export ) VALUES (@proof_type, @allow_sync_insert, @allow_sync_export) -ON CONFLICT(proof_type) - DO UPDATE SET - allow_sync_insert = @allow_sync_insert, - allow_sync_export = @allow_sync_export; +ON CONFLICT (proof_type) +DO UPDATE SET +allow_sync_insert = @allow_sync_insert, +allow_sync_export = @allow_sync_export; -- name: QueryFederationGlobalSyncConfigs :many -SELECT proof_type, allow_sync_insert, allow_sync_export +SELECT + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_global_sync_config ORDER BY proof_type; -- name: UpsertFederationUniSyncConfig :exec -INSERT INTO federation_uni_sync_config ( - namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export +INSERT INTO federation_uni_sync_config ( + namespace, + asset_id, + group_key, + proof_type, + allow_sync_insert, + allow_sync_export ) -VALUES( - @namespace, @asset_id, @group_key, @proof_type, @allow_sync_insert, @allow_sync_export +VALUES ( + @namespace, + @asset_id, + @group_key, + @proof_type, + @allow_sync_insert, + @allow_sync_export ) -ON CONFLICT(namespace) - DO UPDATE SET - allow_sync_insert = @allow_sync_insert, - allow_sync_export = @allow_sync_export; +ON CONFLICT (namespace) +DO UPDATE SET +allow_sync_insert = @allow_sync_insert, +allow_sync_export = @allow_sync_export; -- name: QueryFederationUniSyncConfigs :many -SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export +SELECT + namespace, + asset_id, + group_key, + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_uni_sync_config ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type; -- name: UpsertFederationProofSyncLog :one -INSERT INTO federation_proof_sync_log AS log ( +INSERT INTO federation_proof_sync_log ( status, timestamp, sync_direction, proof_leaf_id, universe_root_id, servers_id ) VALUES ( @@ -376,7 +548,8 @@ INSERT INTO federation_proof_sync_log AS log ( -- Select the leaf id from the universe_leaves table. SELECT id FROM universe_leaves - WHERE leaf_node_namespace = @leaf_namespace + WHERE + leaf_node_namespace = @leaf_namespace AND minting_point = @leaf_minting_point_bytes AND script_key_bytes = @leaf_script_key_bytes LIMIT 1 @@ -397,18 +570,23 @@ INSERT INTO federation_proof_sync_log AS log ( ) ) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) DO UPDATE SET - status = EXCLUDED.status, - timestamp = EXCLUDED.timestamp, - -- Increment the attempt counter. - attempt_counter = CASE - WHEN @bump_sync_attempt_counter = TRUE THEN log.attempt_counter + 1 - ELSE log.attempt_counter - END +status = excluded.status, +timestamp = excluded.timestamp, +-- Increment the attempt counter. +attempt_counter = CASE + WHEN @bump_sync_attempt_counter = TRUE + THEN federation_proof_sync_log.attempt_counter + 1 + ELSE federation_proof_sync_log.attempt_counter +END RETURNING id; -- name: QueryFederationProofSyncLog :many SELECT - log.id, status, timestamp, sync_direction, attempt_counter, + log.id, + log.status, + log.timestamp, + log.sync_direction, + log.attempt_counter, -- Select fields from the universe_servers table. server.id AS server_id, server.server_host, @@ -424,24 +602,37 @@ SELECT root.proof_type AS uni_proof_type FROM federation_proof_sync_log AS log JOIN universe_leaves AS leaf - ON leaf.id = log.proof_leaf_id + ON log.proof_leaf_id = leaf.id -- Join on mssmt_nodes to get leaf related fields. JOIN mssmt_nodes AS mssmt_node - ON leaf.leaf_node_key = mssmt_node.key + ON + leaf.leaf_node_key = mssmt_node.key AND leaf.leaf_node_namespace = mssmt_node.namespace -- Join on genesis_info_view to get leaf related fields. JOIN genesis_info_view AS genesis - ON leaf.asset_genesis_id = genesis.gen_asset_id + ON leaf.asset_genesis_id = genesis.gen_asset_id JOIN universe_servers AS server - ON server.id = log.servers_id + ON log.servers_id = server.id JOIN universe_roots AS root - ON root.id = log.universe_root_id -WHERE (log.sync_direction = sqlc.narg('sync_direction') OR sqlc.narg('sync_direction') IS NULL) - AND (log.status = sqlc.narg('status') OR sqlc.narg('status') IS NULL) - -- Universe leaves WHERE clauses. - AND (leaf.leaf_node_namespace = sqlc.narg('leaf_namespace') OR sqlc.narg('leaf_namespace') IS NULL) - AND (leaf.minting_point = sqlc.narg('leaf_minting_point_bytes') OR sqlc.narg('leaf_minting_point_bytes') IS NULL) - AND (leaf.script_key_bytes = sqlc.narg('leaf_script_key_bytes') OR sqlc.narg('leaf_script_key_bytes') IS NULL); + ON log.universe_root_id = root.id +WHERE ( + log.sync_direction = sqlc.narg('sync_direction') + OR sqlc.narg('sync_direction') IS NULL +) +AND (log.status = sqlc.narg('status') OR sqlc.narg('status') IS NULL) +-- Universe leaves WHERE clauses. +AND ( + leaf.leaf_node_namespace = sqlc.narg('leaf_namespace') + OR sqlc.narg('leaf_namespace') IS NULL +) +AND ( + leaf.minting_point = sqlc.narg('leaf_minting_point_bytes') + OR sqlc.narg('leaf_minting_point_bytes') IS NULL +) +AND ( + leaf.script_key_bytes = sqlc.narg('leaf_script_key_bytes') + OR sqlc.narg('leaf_script_key_bytes') IS NULL +); -- name: DeleteFederationProofSyncLog :exec WITH selected_server_id AS ( @@ -450,36 +641,49 @@ WITH selected_server_id AS ( SELECT id FROM universe_servers WHERE - (server_host = sqlc.narg('server_host') - OR sqlc.narg('server_host') IS NULL) + ( + server_host = sqlc.narg('server_host') + OR sqlc.narg('server_host') IS NULL + ) ) + DELETE FROM federation_proof_sync_log WHERE - servers_id IN (SELECT id FROM selected_server_id) AND - (status = sqlc.narg('status') - OR sqlc.narg('status') IS NULL) AND - (timestamp >= sqlc.narg('min_timestamp') - OR sqlc.narg('min_timestamp') IS NULL) AND - (attempt_counter >= sqlc.narg('min_attempt_counter') - OR sqlc.narg('min_attempt_counter') IS NULL); + servers_id IN (SELECT id FROM selected_server_id) + AND ( + status = sqlc.narg('status') + OR sqlc.narg('status') IS NULL + ) + AND ( + timestamp >= sqlc.narg('min_timestamp') + OR sqlc.narg('min_timestamp') IS NULL + ) + AND ( + attempt_counter >= sqlc.narg('min_attempt_counter') + OR sqlc.narg('min_attempt_counter') IS NULL + ); -- name: UpsertMultiverseRoot :one INSERT INTO multiverse_roots (namespace_root, proof_type) VALUES (@namespace_root, @proof_type) ON CONFLICT (namespace_root) - -- This is a no-op to allow returning the ID. - DO UPDATE SET namespace_root = EXCLUDED.namespace_root +-- This is a no-op to allow returning the ID. +DO UPDATE SET namespace_root = excluded.namespace_root RETURNING id; -- name: FetchMultiverseRoot :one -SELECT proof_type, n.hash_key as multiverse_root_hash, n.sum as multiverse_root_sum -FROM multiverse_roots r -JOIN mssmt_roots m +SELECT + r.proof_type, + n.hash_key AS multiverse_root_hash, + n.sum AS multiverse_root_sum +FROM multiverse_roots AS r +JOIN mssmt_roots AS m ON r.namespace_root = m.namespace -JOIN mssmt_nodes n - ON m.root_hash = n.hash_key AND - m.namespace = n.namespace -WHERE namespace_root = @namespace_root; +JOIN mssmt_nodes AS n + ON + m.root_hash = n.hash_key + AND m.namespace = n.namespace +WHERE r.namespace_root = @namespace_root; -- name: UpsertMultiverseLeaf :one INSERT INTO multiverse_leaves ( @@ -489,9 +693,9 @@ INSERT INTO multiverse_leaves ( @leaf_node_namespace ) ON CONFLICT (leaf_node_key, leaf_node_namespace) - -- This is a no-op to allow returning the ID. - DO UPDATE SET leaf_node_key = EXCLUDED.leaf_node_key, - leaf_node_namespace = EXCLUDED.leaf_node_namespace +-- This is a no-op to allow returning the ID. +DO UPDATE SET leaf_node_key = excluded.leaf_node_key, +leaf_node_namespace = excluded.leaf_node_namespace RETURNING id; -- name: DeleteMultiverseLeaf :exec @@ -499,14 +703,21 @@ DELETE FROM multiverse_leaves WHERE leaf_node_namespace = @namespace AND leaf_node_key = @leaf_node_key; -- name: QueryMultiverseLeaves :many -SELECT r.namespace_root, r.proof_type, l.asset_id, l.group_key, - smt_nodes.value AS universe_root_hash, smt_nodes.sum AS universe_root_sum -FROM multiverse_leaves l -JOIN mssmt_nodes smt_nodes - ON l.leaf_node_key = smt_nodes.key AND - l.leaf_node_namespace = smt_nodes.namespace -JOIN multiverse_roots r - ON l.multiverse_root_id = r.id -WHERE r.proof_type = @proof_type AND - (l.asset_id = @asset_id OR @asset_id IS NULL) AND - (l.group_key = @group_key OR @group_key IS NULL); +SELECT + r.namespace_root, + r.proof_type, + l.asset_id, + l.group_key, + smt_nodes.value AS universe_root_hash, + smt_nodes.sum AS universe_root_sum +FROM multiverse_leaves AS l +JOIN mssmt_nodes AS smt_nodes + ON + l.leaf_node_key = smt_nodes.key + AND l.leaf_node_namespace = smt_nodes.namespace +JOIN multiverse_roots AS r + ON l.multiverse_root_id = r.id +WHERE + r.proof_type = @proof_type + AND (l.asset_id = @asset_id OR @asset_id IS NULL) + AND (l.group_key = @group_key OR @group_key IS NULL); diff --git a/tapdb/sqlc/schemas/generated_schema.sql b/tapdb/sqlc/schemas/generated_schema.sql index 9c954f65e..90d08ec80 100644 --- a/tapdb/sqlc/schemas/generated_schema.sql +++ b/tapdb/sqlc/schemas/generated_schema.sql @@ -103,16 +103,16 @@ CREATE TABLE "asset_burn_transfers" ( burn_id INTEGER PRIMARY KEY, -- A reference to the primary key of the transfer that includes this burn. - transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + transfer_id BIGINT NOT NULL REFERENCES asset_transfers(id), -- A note that may contain user defined metadata. note TEXT, -- The asset id of the burnt asset. - asset_id BLOB NOT NULL REFERENCES genesis_assets (asset_id), + asset_id BLOB NOT NULL REFERENCES genesis_assets(asset_id), -- The group key of the group the burnt asset belonged to. - group_key BLOB REFERENCES asset_groups (tweaked_group_key), + group_key BLOB REFERENCES asset_groups(tweaked_group_key), -- The amount of the asset that was burned. amount BIGINT NOT NULL diff --git a/tapdb/sqlc/transfers.sql.go b/tapdb/sqlc/transfers.sql.go index 2ec3dec3d..2e9733492 100644 --- a/tapdb/sqlc/transfers.sql.go +++ b/tapdb/sqlc/transfers.sql.go @@ -13,10 +13,14 @@ import ( const ApplyPendingOutput = `-- name: ApplyPendingOutput :one WITH spent_asset AS ( - SELECT genesis_id, asset_group_witness_id, script_version + SELECT + assets.genesis_id, + assets.asset_group_witness_id, + assets.script_version FROM assets WHERE assets.asset_id = $10 ) + INSERT INTO assets ( genesis_id, version, asset_group_witness_id, script_version, lock_time, relative_lock_time, script_key_id, anchor_utxo_id, amount, @@ -30,9 +34,7 @@ INSERT INTO assets ( $7, $8, $9 ) ON CONFLICT (genesis_id, script_key_id, anchor_utxo_id) - -- This is a NOP, anchor_utxo_id is one of the unique fields that caused the - -- conflict. - DO UPDATE SET anchor_utxo_id = EXCLUDED.anchor_utxo_id +DO UPDATE SET anchor_utxo_id = excluded.anchor_utxo_id RETURNING asset_id ` @@ -49,6 +51,8 @@ type ApplyPendingOutputParams struct { SpentAssetID int64 } +// This is a NOP, anchor_utxo_id is one of the unique fields that caused the +// conflict. func (q *Queries) ApplyPendingOutput(ctx context.Context, arg ApplyPendingOutputParams) (int64, error) { row := q.db.QueryRowContext(ctx, ApplyPendingOutput, arg.AssetVersion, @@ -78,8 +82,13 @@ func (q *Queries) DeleteAssetWitnesses(ctx context.Context, assetID int64) error } const FetchTransferInputs = `-- name: FetchTransferInputs :many -SELECT input_id, anchor_point, asset_id, script_key, amount -FROM asset_transfer_inputs inputs +SELECT + input_id, + anchor_point, + asset_id, + script_key, + amount +FROM asset_transfer_inputs AS inputs WHERE transfer_id = $1 ORDER BY input_id ` @@ -123,10 +132,21 @@ func (q *Queries) FetchTransferInputs(ctx context.Context, transferID int64) ([] const FetchTransferOutputs = `-- name: FetchTransferOutputs :many SELECT - output_id, proof_suffix, amount, serialized_witnesses, script_key_local, - split_commitment_root_hash, split_commitment_root_value, num_passive_assets, - output_type, proof_courier_addr, proof_delivery_complete, position, - asset_version, lock_time, relative_lock_time, + outputs.output_id, + outputs.proof_suffix, + outputs.amount, + outputs.serialized_witnesses, + outputs.script_key_local, + outputs.split_commitment_root_hash, + outputs.split_commitment_root_value, + outputs.num_passive_assets, + outputs.output_type, + outputs.proof_courier_addr, + outputs.proof_delivery_complete, + outputs.position, + outputs.asset_version, + outputs.lock_time, + outputs.relative_lock_time, utxos.utxo_id AS anchor_utxo_id, utxos.outpoint AS anchor_outpoint, utxos.amt_sats AS anchor_value, @@ -137,19 +157,19 @@ SELECT utxo_internal_keys.raw_key AS internal_key_raw_key_bytes, utxo_internal_keys.key_family AS internal_key_family, utxo_internal_keys.key_index AS internal_key_index, - script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, - script_internal_keys.key_id, script_internal_keys.raw_key, script_internal_keys.key_family, script_internal_keys.key_index -FROM asset_transfer_outputs outputs -JOIN managed_utxos utxos - ON outputs.anchor_utxo = utxos.utxo_id + script_keys.script_key_id, script_keys.internal_key_id, script_keys.tweaked_script_key, script_keys.tweak, script_keys.key_type, -- noqa: RF02,AL03 + script_internal_keys.key_id, script_internal_keys.raw_key, script_internal_keys.key_family, script_internal_keys.key_index -- noqa: RF02,AL03 +FROM asset_transfer_outputs AS outputs +JOIN managed_utxos AS utxos + ON outputs.anchor_utxo = utxos.utxo_id JOIN script_keys - ON outputs.script_key = script_keys.script_key_id -JOIN internal_keys script_internal_keys - ON script_keys.internal_key_id = script_internal_keys.key_id -JOIN internal_keys utxo_internal_keys - ON utxos.internal_key_id = utxo_internal_keys.key_id -WHERE transfer_id = $1 -ORDER BY output_id + ON outputs.script_key = script_keys.script_key_id +JOIN internal_keys AS script_internal_keys + ON script_keys.internal_key_id = script_internal_keys.key_id +JOIN internal_keys AS utxo_internal_keys + ON utxos.internal_key_id = utxo_internal_keys.key_id +WHERE outputs.transfer_id = $1 +ORDER BY outputs.output_id ` type FetchTransferOutputsRow struct { @@ -241,11 +261,12 @@ func (q *Queries) FetchTransferOutputs(ctx context.Context, transferID int64) ([ } const InsertAssetTransfer = `-- name: InsertAssetTransfer :one -WITH target_txn(txn_id) AS ( +WITH target_txn (txn_id) AS ( SELECT txn_id FROM chain_txns WHERE txid = $5 ) + INSERT INTO asset_transfers ( height_hint, anchor_txn_id, transfer_time_unix, label, skip_anchor_tx_broadcast @@ -390,19 +411,21 @@ func (q *Queries) InsertBurn(ctx context.Context, arg InsertBurnParams) (int64, } const InsertPassiveAsset = `-- name: InsertPassiveAsset :exec -WITH target_asset(asset_id) AS ( +WITH target_asset (asset_id) AS ( SELECT assets.asset_id FROM assets - JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON assets.anchor_utxo_id = utxos.utxo_id - JOIN script_keys - ON assets.script_key_id = script_keys.script_key_id - WHERE genesis_assets.asset_id = $7 + JOIN genesis_assets + ON assets.genesis_id = genesis_assets.gen_asset_id + JOIN managed_utxos AS utxos + ON assets.anchor_utxo_id = utxos.utxo_id + JOIN script_keys + ON assets.script_key_id = script_keys.script_key_id + WHERE + genesis_assets.asset_id = $7 AND utxos.outpoint = $8 AND script_keys.tweaked_script_key = $3 ) + INSERT INTO passive_assets ( asset_id, transfer_id, new_anchor_utxo, script_key, new_witness_stack, new_proof, asset_version @@ -458,32 +481,39 @@ func (q *Queries) LogProofTransferAttempt(ctx context.Context, arg LogProofTrans const QueryAssetTransfers = `-- name: QueryAssetTransfers :many SELECT - id, height_hint, txns.txid, txns.block_hash AS anchor_tx_block_hash, - transfer_time_unix, transfers.label, + transfers.id, + transfers.height_hint, + txns.txid, + txns.block_hash AS anchor_tx_block_hash, + transfers.transfer_time_unix, + transfers.label, transfers.skip_anchor_tx_broadcast -FROM asset_transfers transfers -JOIN chain_txns txns - ON txns.txn_id = transfers.anchor_txn_id +FROM asset_transfers AS transfers +JOIN chain_txns AS txns + ON transfers.anchor_txn_id = txns.txn_id WHERE -- Optionally filter on a given anchor_tx_hash. - (txns.txid = $1 - OR $1 IS NULL) + ( + txns.txid = $1 + OR $1 IS NULL + ) -- Filter for pending transfers only if requested. AND ( - $2 = true AND - ( + $2 = TRUE + AND ( txns.block_hash IS NULL - OR EXISTS ( - SELECT 1 - FROM asset_transfer_outputs outputs - WHERE outputs.transfer_id = transfers.id - AND outputs.proof_delivery_complete = false - ) + OR EXISTS ( + SELECT 1 + FROM asset_transfer_outputs AS outputs + WHERE + outputs.transfer_id = transfers.id + AND outputs.proof_delivery_complete = FALSE + ) ) - OR $2 = false OR $2 IS NULL + OR $2 = FALSE OR $2 IS NULL ) -ORDER BY transfer_time_unix +ORDER BY transfers.transfer_time_unix ` type QueryAssetTransfersParams struct { @@ -539,9 +569,9 @@ SELECT abt.group_key, abt.amount, ct.txid AS anchor_txid -- Retrieving the txid from chain_txns. -FROM asset_burn_transfers abt -JOIN asset_transfers at ON abt.transfer_id = at.id -JOIN chain_txns ct ON at.anchor_txn_id = ct.txn_id +FROM asset_burn_transfers AS abt +JOIN asset_transfers AS at ON abt.transfer_id = at.id +JOIN chain_txns AS ct ON at.anchor_txn_id = ct.txn_id WHERE -- Optionally filter by asset_id. (abt.asset_id = $1 OR $1 IS NULL) @@ -598,17 +628,22 @@ func (q *Queries) QueryBurns(ctx context.Context, arg QueryBurnsParams) ([]Query } const QueryPassiveAssets = `-- name: QueryPassiveAssets :many -SELECT passive.asset_id, passive.new_anchor_utxo, passive.script_key, - passive.new_witness_stack, passive.new_proof, - genesis_assets.asset_id AS genesis_id, passive.asset_version, - utxos.outpoint -FROM passive_assets as passive - JOIN assets - ON passive.asset_id = assets.asset_id - JOIN genesis_assets - ON assets.genesis_id = genesis_assets.gen_asset_id - JOIN managed_utxos utxos - ON passive.new_anchor_utxo = utxos.utxo_id +SELECT + passive.asset_id, + passive.new_anchor_utxo, + passive.script_key, + passive.new_witness_stack, + passive.new_proof, + genesis_assets.asset_id AS genesis_id, + passive.asset_version, + utxos.outpoint +FROM passive_assets AS passive +JOIN assets + ON passive.asset_id = assets.asset_id +JOIN genesis_assets + ON assets.genesis_id = genesis_assets.gen_asset_id +JOIN managed_utxos AS utxos + ON passive.new_anchor_utxo = utxos.utxo_id WHERE passive.transfer_id = $1 ` @@ -658,7 +693,8 @@ func (q *Queries) QueryPassiveAssets(ctx context.Context, transferID int64) ([]Q const QueryProofTransferAttempts = `-- name: QueryProofTransferAttempts :many SELECT time_unix FROM proof_transfer_log -WHERE proof_locator_hash = $1 +WHERE + proof_locator_hash = $1 AND transfer_type = $2 ORDER BY time_unix DESC ` @@ -693,7 +729,8 @@ func (q *Queries) QueryProofTransferAttempts(ctx context.Context, arg QueryProof const ReAnchorPassiveAssets = `-- name: ReAnchorPassiveAssets :exec UPDATE assets -SET anchor_utxo_id = $1, +SET + anchor_utxo_id = $1, -- The following fields need to be the same fields we reset in -- Asset.CopySpendTemplate. split_commitment_root_hash = NULL, @@ -714,14 +751,16 @@ func (q *Queries) ReAnchorPassiveAssets(ctx context.Context, arg ReAnchorPassive } const SetTransferOutputProofDeliveryStatus = `-- name: SetTransferOutputProofDeliveryStatus :exec -WITH target(output_id) AS ( - SELECT output_id - FROM asset_transfer_outputs output +WITH target (output_id) AS ( + SELECT output.output_id + FROM asset_transfer_outputs AS output JOIN managed_utxos - ON output.anchor_utxo = managed_utxos.utxo_id - WHERE managed_utxos.outpoint = $2 - AND output.position = $3 + ON output.anchor_utxo = managed_utxos.utxo_id + WHERE + managed_utxos.outpoint = $2 + AND output.position = $3 ) + UPDATE asset_transfer_outputs SET proof_delivery_complete = $1 WHERE output_id = (SELECT output_id FROM target) diff --git a/tapdb/sqlc/universe.sql.go b/tapdb/sqlc/universe.sql.go index d2b42310a..8dafeb3c6 100644 --- a/tapdb/sqlc/universe.sql.go +++ b/tapdb/sqlc/universe.sql.go @@ -18,18 +18,27 @@ WITH selected_server_id AS ( SELECT id FROM universe_servers WHERE - (server_host = $4 - OR $4 IS NULL) + ( + server_host = $4 + OR $4 IS NULL + ) ) + DELETE FROM federation_proof_sync_log WHERE - servers_id IN (SELECT id FROM selected_server_id) AND - (status = $1 - OR $1 IS NULL) AND - (timestamp >= $2 - OR $2 IS NULL) AND - (attempt_counter >= $3 - OR $3 IS NULL) + servers_id IN (SELECT id FROM selected_server_id) + AND ( + status = $1 + OR $1 IS NULL + ) + AND ( + timestamp >= $2 + OR $2 IS NULL + ) + AND ( + attempt_counter >= $3 + OR $3 IS NULL + ) ` type DeleteFederationProofSyncLogParams struct { @@ -70,6 +79,7 @@ WITH root_id AS ( FROM universe_roots WHERE namespace_root = $1 ) + DELETE FROM universe_events WHERE universe_root_id = (SELECT id FROM root_id) ` @@ -115,14 +125,18 @@ func (q *Queries) DeleteUniverseServer(ctx context.Context, arg DeleteUniverseSe } const FetchMultiverseRoot = `-- name: FetchMultiverseRoot :one -SELECT proof_type, n.hash_key as multiverse_root_hash, n.sum as multiverse_root_sum -FROM multiverse_roots r -JOIN mssmt_roots m +SELECT + r.proof_type, + n.hash_key AS multiverse_root_hash, + n.sum AS multiverse_root_sum +FROM multiverse_roots AS r +JOIN mssmt_roots AS m ON r.namespace_root = m.namespace -JOIN mssmt_nodes n - ON m.root_hash = n.hash_key AND - m.namespace = n.namespace -WHERE namespace_root = $1 +JOIN mssmt_nodes AS n + ON + m.root_hash = n.hash_key + AND m.namespace = n.namespace +WHERE r.namespace_root = $1 ` type FetchMultiverseRootRow struct { @@ -139,10 +153,12 @@ func (q *Queries) FetchMultiverseRoot(ctx context.Context, namespaceRoot string) } const FetchUniverseKeys = `-- name: FetchUniverseKeys :many -SELECT leaves.minting_point, leaves.script_key_bytes +SELECT + leaves.minting_point, + leaves.script_key_bytes FROM universe_leaves AS leaves WHERE leaves.leaf_node_namespace = $1 -ORDER BY +ORDER BY CASE WHEN $2 = 0 THEN leaves.id END ASC, CASE WHEN $2 = 1 THEN leaves.id END DESC LIMIT $4 OFFSET $3 @@ -189,17 +205,22 @@ func (q *Queries) FetchUniverseKeys(ctx context.Context, arg FetchUniverseKeysPa } const FetchUniverseRoot = `-- name: FetchUniverseRoot :one -SELECT universe_roots.asset_id, group_key, proof_type, - mssmt_nodes.hash_key root_hash, mssmt_nodes.sum root_sum, - genesis_assets.asset_tag asset_name +SELECT + universe_roots.asset_id, + universe_roots.group_key, + universe_roots.proof_type, + mssmt_nodes.hash_key AS root_hash, + mssmt_nodes.sum AS root_sum, + genesis_assets.asset_tag AS asset_name FROM universe_roots -JOIN mssmt_roots +JOIN mssmt_roots ON universe_roots.namespace_root = mssmt_roots.namespace -JOIN mssmt_nodes - ON mssmt_nodes.hash_key = mssmt_roots.root_hash - AND mssmt_nodes.namespace = mssmt_roots.namespace +JOIN mssmt_nodes + ON + mssmt_roots.root_hash = mssmt_nodes.hash_key + AND mssmt_roots.namespace = mssmt_nodes.namespace JOIN genesis_assets - ON genesis_assets.asset_id = universe_roots.asset_id + ON universe_roots.asset_id = genesis_assets.asset_id WHERE mssmt_nodes.namespace = $1 ` @@ -228,30 +249,37 @@ func (q *Queries) FetchUniverseRoot(ctx context.Context, namespace string) (Fetc const InsertNewProofEvent = `-- name: InsertNewProofEvent :exec WITH group_key_root_id AS ( - SELECT id - FROM universe_roots roots - WHERE group_key = $1 + SELECT roots.id + FROM universe_roots AS roots + WHERE + roots.group_key = $1 AND roots.proof_type = $4 -), asset_id_root_id AS ( +), + +asset_id_root_id AS ( SELECT leaves.universe_root_id AS id - FROM universe_leaves leaves - JOIN universe_roots roots + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN genesis_info_view gen + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id - WHERE gen.asset_id = $5 + WHERE + gen.asset_id = $5 AND roots.proof_type = $4 LIMIT 1 ) + INSERT INTO universe_events ( event_type, universe_root_id, event_time, event_timestamp ) VALUES ( 'NEW_PROOF', - CASE WHEN length($1) > 0 THEN ( + CASE + WHEN length($1) > 0 THEN ( SELECT id FROM group_key_root_id ) ELSE ( SELECT id FROM asset_id_root_id - ) END, + ) + END, $2, $3 ) ` @@ -277,30 +305,37 @@ func (q *Queries) InsertNewProofEvent(ctx context.Context, arg InsertNewProofEve const InsertNewSyncEvent = `-- name: InsertNewSyncEvent :exec WITH group_key_root_id AS ( - SELECT id - FROM universe_roots roots - WHERE group_key = $1 - AND roots.proof_type = $4 -), asset_id_root_id AS ( + SELECT roots.id + FROM universe_roots AS roots + WHERE + roots.group_key = $1 + AND roots.proof_type = $4 +), + +asset_id_root_id AS ( SELECT leaves.universe_root_id AS id - FROM universe_leaves leaves - JOIN universe_roots roots + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN genesis_info_view gen + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id - WHERE gen.asset_id = $5 + WHERE + gen.asset_id = $5 AND roots.proof_type = $4 LIMIT 1 ) + INSERT INTO universe_events ( event_type, universe_root_id, event_time, event_timestamp ) VALUES ( 'SYNC', - CASE WHEN length($1) > 0 THEN ( + CASE + WHEN length($1) > 0 THEN ( SELECT id FROM group_key_root_id ) ELSE ( SELECT id FROM asset_id_root_id - ) END, + ) + END, $2, $3 ) ` @@ -325,7 +360,7 @@ func (q *Queries) InsertNewSyncEvent(ctx context.Context, arg InsertNewSyncEvent } const InsertUniverseServer = `-- name: InsertUniverseServer :exec -INSERT INTO universe_servers( +INSERT INTO universe_servers ( server_host, last_sync_time ) VALUES ( $1, $2 @@ -361,11 +396,13 @@ func (q *Queries) LogServerSync(ctx context.Context, arg LogServerSyncParams) er const QueryAssetStatsPerDayPostgres = `-- name: QueryAssetStatsPerDayPostgres :many SELECT to_char(to_timestamp(event_timestamp), 'YYYY-MM-DD') AS day, - SUM(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, - SUM(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) AS new_proof_events + sum(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, + sum(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) + AS new_proof_events FROM universe_events -WHERE event_type IN ('SYNC', 'NEW_PROOF') - AND event_timestamp BETWEEN $1 AND $2 +WHERE + event_type IN ('SYNC', 'NEW_PROOF') + AND event_timestamp BETWEEN $1 AND $2 GROUP BY day ORDER BY day ` @@ -407,12 +444,15 @@ func (q *Queries) QueryAssetStatsPerDayPostgres(ctx context.Context, arg QueryAs const QueryAssetStatsPerDaySqlite = `-- name: QueryAssetStatsPerDaySqlite :many SELECT - cast(strftime('%Y-%m-%d', datetime(event_timestamp, 'unixepoch')) as text) AS day, - SUM(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, - SUM(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) AS new_proof_events + cast(strftime('%Y-%m-%d', datetime(event_timestamp, 'unixepoch')) AS text) + AS day, + sum(CASE WHEN event_type = 'SYNC' THEN 1 ELSE 0 END) AS sync_events, + sum(CASE WHEN event_type = 'NEW_PROOF' THEN 1 ELSE 0 END) + AS new_proof_events FROM universe_events -WHERE event_type IN ('SYNC', 'NEW_PROOF') AND - event_timestamp >= $1 AND event_timestamp <= $2 +WHERE + event_type IN ('SYNC', 'NEW_PROOF') + AND event_timestamp >= $1 AND event_timestamp <= $2 GROUP BY day ORDER BY day ` @@ -452,7 +492,10 @@ func (q *Queries) QueryAssetStatsPerDaySqlite(ctx context.Context, arg QueryAsse } const QueryFederationGlobalSyncConfigs = `-- name: QueryFederationGlobalSyncConfigs :many -SELECT proof_type, allow_sync_insert, allow_sync_export +SELECT + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_global_sync_config ORDER BY proof_type ` @@ -482,7 +525,11 @@ func (q *Queries) QueryFederationGlobalSyncConfigs(ctx context.Context) ([]Feder const QueryFederationProofSyncLog = `-- name: QueryFederationProofSyncLog :many SELECT - log.id, status, timestamp, sync_direction, attempt_counter, + log.id, + log.status, + log.timestamp, + log.sync_direction, + log.attempt_counter, -- Select fields from the universe_servers table. server.id AS server_id, server.server_host, @@ -498,22 +545,34 @@ SELECT root.proof_type AS uni_proof_type FROM federation_proof_sync_log AS log JOIN universe_leaves AS leaf - ON leaf.id = log.proof_leaf_id + ON log.proof_leaf_id = leaf.id JOIN mssmt_nodes AS mssmt_node - ON leaf.leaf_node_key = mssmt_node.key + ON + leaf.leaf_node_key = mssmt_node.key AND leaf.leaf_node_namespace = mssmt_node.namespace JOIN genesis_info_view AS genesis - ON leaf.asset_genesis_id = genesis.gen_asset_id + ON leaf.asset_genesis_id = genesis.gen_asset_id JOIN universe_servers AS server - ON server.id = log.servers_id + ON log.servers_id = server.id JOIN universe_roots AS root - ON root.id = log.universe_root_id -WHERE (log.sync_direction = $1 OR $1 IS NULL) - AND (log.status = $2 OR $2 IS NULL) - -- Universe leaves WHERE clauses. - AND (leaf.leaf_node_namespace = $3 OR $3 IS NULL) - AND (leaf.minting_point = $4 OR $4 IS NULL) - AND (leaf.script_key_bytes = $5 OR $5 IS NULL) + ON log.universe_root_id = root.id +WHERE ( + log.sync_direction = $1 + OR $1 IS NULL +) +AND (log.status = $2 OR $2 IS NULL) +AND ( + leaf.leaf_node_namespace = $3 + OR $3 IS NULL +) +AND ( + leaf.minting_point = $4 + OR $4 IS NULL +) +AND ( + leaf.script_key_bytes = $5 + OR $5 IS NULL +) ` type QueryFederationProofSyncLogParams struct { @@ -544,6 +603,7 @@ type QueryFederationProofSyncLogRow struct { // Join on mssmt_nodes to get leaf related fields. // Join on genesis_info_view to get leaf related fields. +// Universe leaves WHERE clauses. func (q *Queries) QueryFederationProofSyncLog(ctx context.Context, arg QueryFederationProofSyncLogParams) ([]QueryFederationProofSyncLogRow, error) { rows, err := q.db.QueryContext(ctx, QueryFederationProofSyncLog, arg.SyncDirection, @@ -590,7 +650,13 @@ func (q *Queries) QueryFederationProofSyncLog(ctx context.Context, arg QueryFede } const QueryFederationUniSyncConfigs = `-- name: QueryFederationUniSyncConfigs :many -SELECT namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export +SELECT + namespace, + asset_id, + group_key, + proof_type, + allow_sync_insert, + allow_sync_export FROM federation_uni_sync_config ORDER BY group_key NULLS LAST, asset_id NULLS LAST, proof_type ` @@ -635,17 +701,24 @@ func (q *Queries) QueryFederationUniSyncConfigs(ctx context.Context) ([]QueryFed } const QueryMultiverseLeaves = `-- name: QueryMultiverseLeaves :many -SELECT r.namespace_root, r.proof_type, l.asset_id, l.group_key, - smt_nodes.value AS universe_root_hash, smt_nodes.sum AS universe_root_sum -FROM multiverse_leaves l -JOIN mssmt_nodes smt_nodes - ON l.leaf_node_key = smt_nodes.key AND - l.leaf_node_namespace = smt_nodes.namespace -JOIN multiverse_roots r - ON l.multiverse_root_id = r.id -WHERE r.proof_type = $1 AND - (l.asset_id = $2 OR $2 IS NULL) AND - (l.group_key = $3 OR $3 IS NULL) +SELECT + r.namespace_root, + r.proof_type, + l.asset_id, + l.group_key, + smt_nodes.value AS universe_root_hash, + smt_nodes.sum AS universe_root_sum +FROM multiverse_leaves AS l +JOIN mssmt_nodes AS smt_nodes + ON + l.leaf_node_key = smt_nodes.key + AND l.leaf_node_namespace = smt_nodes.namespace +JOIN multiverse_roots AS r + ON l.multiverse_root_id = r.id +WHERE + r.proof_type = $1 + AND (l.asset_id = $2 OR $2 IS NULL) + AND (l.group_key = $3 OR $3 IS NULL) ` type QueryMultiverseLeavesParams struct { @@ -696,56 +769,80 @@ func (q *Queries) QueryMultiverseLeaves(ctx context.Context, arg QueryMultiverse const QueryUniverseAssetStats = `-- name: QueryUniverseAssetStats :many WITH asset_supply AS ( - SELECT SUM(nodes.sum) AS supply, gen.asset_id AS asset_id - FROM universe_leaves leaves - JOIN universe_roots roots + SELECT + sum(nodes.sum) AS supply, + gen.asset_id + FROM universe_leaves AS leaves + JOIN universe_roots AS roots ON leaves.universe_root_id = roots.id - JOIN mssmt_nodes nodes - ON leaves.leaf_node_key = nodes.key AND - leaves.leaf_node_namespace = nodes.namespace - JOIN genesis_info_view gen + JOIN mssmt_nodes AS nodes + ON + leaves.leaf_node_key = nodes.key + AND leaves.leaf_node_namespace = nodes.namespace + JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id WHERE roots.proof_type = 'issuance' GROUP BY gen.asset_id -), group_supply AS ( - SELECT sum AS num_assets, uroots.group_key AS group_key - FROM mssmt_nodes nodes - JOIN mssmt_roots roots - ON nodes.hash_key = roots.root_hash AND - nodes.namespace = roots.namespace - JOIN universe_roots uroots - ON roots.namespace = uroots.namespace_root +), + +group_supply AS ( + SELECT + nodes.sum AS num_assets, + uroots.group_key + FROM mssmt_nodes AS nodes + JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND nodes.namespace = roots.namespace + JOIN universe_roots AS uroots + ON roots.namespace = uroots.namespace_root WHERE uroots.proof_type = 'issuance' -), asset_info AS ( - SELECT asset_supply.supply, group_supply.num_assets AS group_supply, - gen.asset_id AS asset_id, - gen.asset_tag AS asset_name, gen.asset_type AS asset_type, - gen.block_height AS genesis_height, gen.prev_out AS genesis_prev_out, - group_info.tweaked_group_key AS group_key, - gen.output_index AS anchor_index, gen.anchor_txid AS anchor_txid - FROM genesis_info_view gen +), + +asset_info AS ( + SELECT + asset_supply.supply, + group_supply.num_assets AS group_supply, + gen.asset_id, + gen.asset_tag AS asset_name, + gen.asset_type, + gen.block_height AS genesis_height, + gen.prev_out AS genesis_prev_out, + group_info.tweaked_group_key AS group_key, + gen.output_index AS anchor_index, + gen.anchor_txid + FROM genesis_info_view AS gen JOIN asset_supply - ON asset_supply.asset_id = gen.asset_id + ON gen.asset_id = asset_supply.asset_id -- We use a LEFT JOIN here as not every asset has a group key, so this'll -- generate rows that have NULL values for the group key fields if an asset -- doesn't have a group key. - LEFT JOIN key_group_info_view group_info + LEFT JOIN key_group_info_view AS group_info ON gen.gen_asset_id = group_info.gen_asset_id LEFT JOIN group_supply - ON group_supply.group_key = group_info.x_only_group_key - WHERE (gen.asset_tag = $5 OR $5 IS NULL) AND - (gen.asset_type = $6 OR $6 IS NULL) AND - (gen.asset_id = $7 OR $7 IS NULL) + ON group_info.x_only_group_key = group_supply.group_key + WHERE ( + gen.asset_tag = $5 + OR $5 IS NULL + ) + AND ( + gen.asset_type = $6 + OR $6 IS NULL + ) + AND (gen.asset_id = $7 OR $7 IS NULL) ) -SELECT asset_info.supply AS asset_supply, - asset_info.group_supply AS group_supply, - asset_info.asset_name AS asset_name, - asset_info.asset_type AS asset_type, asset_info.asset_id AS asset_id, - asset_info.genesis_height AS genesis_height, - asset_info.genesis_prev_out AS genesis_prev_out, - asset_info.group_key AS group_key, - asset_info.anchor_index AS anchor_index, - asset_info.anchor_txid AS anchor_txid, + +SELECT + asset_info.supply AS asset_supply, + asset_info.group_supply, + asset_info.asset_name, + asset_info.asset_type, + asset_info.asset_id, + asset_info.genesis_height, + asset_info.genesis_prev_out, + asset_info.group_key, + asset_info.anchor_index, + asset_info.anchor_txid, universe_stats.total_asset_syncs AS total_syncs, universe_stats.total_asset_proofs AS total_proofs FROM asset_info @@ -753,34 +850,104 @@ JOIN universe_stats ON asset_info.asset_id = universe_stats.asset_id WHERE universe_stats.proof_type = 'issuance' ORDER BY - CASE WHEN $1 = 'asset_id' AND $2 = 0 THEN - asset_info.asset_id END ASC, - CASE WHEN $1 = 'asset_id' AND $2 = 1 THEN - asset_info.asset_id END DESC, - CASE WHEN $1 = 'asset_name' AND $2 = 0 THEN - asset_info.asset_name END ASC , - CASE WHEN $1 = 'asset_name' AND $2 = 1 THEN - asset_info.asset_name END DESC , - CASE WHEN $1 = 'asset_type' AND $2 = 0 THEN - asset_info.asset_type END ASC , - CASE WHEN $1 = 'asset_type' AND $2 = 1 THEN - asset_info.asset_type END DESC, - CASE WHEN $1 = 'total_syncs' AND $2 = 0 THEN - universe_stats.total_asset_syncs END ASC , - CASE WHEN $1 = 'total_syncs' AND $2 = 1 THEN - universe_stats.total_asset_syncs END DESC, - CASE WHEN $1 = 'total_proofs' AND $2 = 0 THEN - universe_stats.total_asset_proofs END ASC , - CASE WHEN $1 = 'total_proofs' AND $2 = 1 THEN - universe_stats.total_asset_proofs END DESC, - CASE WHEN $1 = 'genesis_height' AND $2 = 0 THEN - asset_info.genesis_height END ASC , - CASE WHEN $1 = 'genesis_height' AND $2 = 1 THEN - asset_info.genesis_height END DESC, - CASE WHEN $1 = 'total_supply' AND $2 = 0 THEN - asset_info.supply END ASC , - CASE WHEN $1 = 'total_supply' AND $2 = 1 THEN - asset_info.supply END DESC + CASE + WHEN + $1 = 'asset_id' + AND $2 = 0 + THEN + asset_info.asset_id + END ASC, + CASE + WHEN + $1 = 'asset_id' + AND $2 = 1 + THEN + asset_info.asset_id + END DESC, + CASE + WHEN + $1 = 'asset_name' + AND $2 = 0 + THEN + asset_info.asset_name + END ASC, + CASE + WHEN + $1 = 'asset_name' + AND $2 = 1 + THEN + asset_info.asset_name + END DESC, + CASE + WHEN + $1 = 'asset_type' + AND $2 = 0 + THEN + asset_info.asset_type + END ASC, + CASE + WHEN + $1 = 'asset_type' + AND $2 = 1 + THEN + asset_info.asset_type + END DESC, + CASE + WHEN + $1 = 'total_syncs' + AND $2 = 0 + THEN + universe_stats.total_asset_syncs + END ASC, + CASE + WHEN + $1 = 'total_syncs' + AND $2 = 1 + THEN + universe_stats.total_asset_syncs + END DESC, + CASE + WHEN + $1 = 'total_proofs' + AND $2 = 0 + THEN + universe_stats.total_asset_proofs + END ASC, + CASE + WHEN + $1 = 'total_proofs' + AND $2 = 1 + THEN + universe_stats.total_asset_proofs + END DESC, + CASE + WHEN + $1 = 'genesis_height' + AND $2 = 0 + THEN + asset_info.genesis_height + END ASC, + CASE + WHEN + $1 = 'genesis_height' + AND $2 = 1 + THEN + asset_info.genesis_height + END DESC, + CASE + WHEN + $1 = 'total_supply' + AND $2 = 0 + THEN + asset_info.supply + END ASC, + CASE + WHEN + $1 = 'total_supply' + AND $2 = 1 + THEN + asset_info.supply + END DESC LIMIT $4 OFFSET $3 ` @@ -856,19 +1023,29 @@ func (q *Queries) QueryUniverseAssetStats(ctx context.Context, arg QueryUniverse } const QueryUniverseLeaves = `-- name: QueryUniverseLeaves :many -SELECT leaves.script_key_bytes, gen.gen_asset_id, nodes.value AS genesis_proof, - nodes.sum AS sum_amt, gen.asset_id +SELECT + leaves.script_key_bytes, + gen.gen_asset_id, + nodes.value AS genesis_proof, + nodes.sum AS sum_amt, + gen.asset_id FROM universe_leaves AS leaves JOIN mssmt_nodes AS nodes - ON leaves.leaf_node_key = nodes.key - AND leaves.leaf_node_namespace = nodes.namespace + ON + leaves.leaf_node_key = nodes.key + AND leaves.leaf_node_namespace = nodes.namespace JOIN genesis_info_view AS gen ON leaves.asset_genesis_id = gen.gen_asset_id -WHERE leaves.leaf_node_namespace = $1 - AND (leaves.minting_point = $2 OR - $2 IS NULL) - AND (leaves.script_key_bytes = $3 OR - $3 IS NULL) +WHERE + leaves.leaf_node_namespace = $1 + AND ( + leaves.minting_point = $2 + OR $2 IS NULL + ) + AND ( + leaves.script_key_bytes = $3 + OR $3 IS NULL + ) ` type QueryUniverseLeavesParams struct { @@ -916,9 +1093,12 @@ func (q *Queries) QueryUniverseLeaves(ctx context.Context, arg QueryUniverseLeav const QueryUniverseServers = `-- name: QueryUniverseServers :many SELECT id, server_host, last_sync_time FROM universe_servers -WHERE (id = $1 OR $1 IS NULL) AND - (server_host = $2 - OR $2 IS NULL) +WHERE + (id = $1 OR $1 IS NULL) + AND ( + server_host = $2 + OR $2 IS NULL + ) ` type QueryUniverseServersParams struct { @@ -951,43 +1131,57 @@ func (q *Queries) QueryUniverseServers(ctx context.Context, arg QueryUniverseSer const QueryUniverseStats = `-- name: QueryUniverseStats :one WITH stats AS ( - SELECT total_asset_syncs, total_asset_proofs + SELECT + total_asset_syncs, + total_asset_proofs FROM universe_stats -), group_ids AS ( +), + +group_ids AS ( SELECT id FROM universe_roots WHERE group_key IS NOT NULL -), asset_keys AS ( - SELECT hash_key - FROM mssmt_nodes nodes - JOIN mssmt_roots roots - ON nodes.hash_key = roots.root_hash AND - nodes.namespace = roots.namespace - JOIN universe_roots uroots - ON roots.namespace = uroots.namespace_root -), aggregated AS ( - SELECT COALESCE(SUM(stats.total_asset_syncs), 0) AS total_syncs, - COALESCE(SUM(stats.total_asset_proofs), 0) AS total_proofs, - 0 AS total_num_groups, - 0 AS total_num_assets +), + +asset_keys AS ( + SELECT nodes.hash_key + FROM mssmt_nodes AS nodes + JOIN mssmt_roots AS roots + ON + nodes.hash_key = roots.root_hash + AND nodes.namespace = roots.namespace + JOIN universe_roots AS uroots + ON roots.namespace = uroots.namespace_root +), + +aggregated AS ( + SELECT + coalesce(sum(stats.total_asset_syncs), 0) AS total_syncs, + coalesce(sum(stats.total_asset_proofs), 0) AS total_proofs, + 0 AS total_num_groups, + 0 AS total_num_assets FROM stats UNION ALL - SELECT 0 AS total_syncs, - 0 AS total_proofs, - COALESCE(COUNT(group_ids.id), 0) AS total_num_groups, - 0 AS total_num_assets + SELECT + 0 AS total_syncs, + 0 AS total_proofs, + coalesce(count(group_ids.id), 0) AS total_num_groups, + 0 AS total_num_assets FROM group_ids UNION ALL - SELECT 0 AS total_syncs, - 0 AS total_proofs, - 0 AS total_num_groups, - COALESCE(COUNT(asset_keys.hash_key), 0) AS total_num_assets + SELECT + 0 AS total_syncs, + 0 AS total_proofs, + 0 AS total_num_groups, + coalesce(count(asset_keys.hash_key), 0) AS total_num_assets FROM asset_keys ) -SELECT SUM(total_syncs) AS total_syncs, - SUM(total_proofs) AS total_proofs, - SUM(total_num_groups) AS total_num_groups, - SUM(total_num_assets) AS total_num_assets + +SELECT + sum(total_syncs) AS total_syncs, + sum(total_proofs) AS total_proofs, + sum(total_num_groups) AS total_num_groups, + sum(total_num_assets) AS total_num_assets FROM aggregated ` @@ -1046,18 +1240,23 @@ func (q *Queries) UniverseLeaves(ctx context.Context) ([]UniverseLeafe, error) { } const UniverseRoots = `-- name: UniverseRoots :many -SELECT universe_roots.asset_id, group_key, proof_type, - mssmt_roots.root_hash AS root_hash, mssmt_nodes.sum AS root_sum, - genesis_assets.asset_tag AS asset_name +SELECT + universe_roots.asset_id, + universe_roots.group_key, + universe_roots.proof_type, + mssmt_roots.root_hash, + mssmt_nodes.sum AS root_sum, + genesis_assets.asset_tag AS asset_name FROM universe_roots JOIN mssmt_roots ON universe_roots.namespace_root = mssmt_roots.namespace JOIN mssmt_nodes - ON mssmt_nodes.hash_key = mssmt_roots.root_hash - AND mssmt_nodes.namespace = mssmt_roots.namespace + ON + mssmt_roots.root_hash = mssmt_nodes.hash_key + AND mssmt_roots.namespace = mssmt_nodes.namespace JOIN genesis_assets - ON genesis_assets.asset_id = universe_roots.asset_id -ORDER BY + ON universe_roots.asset_id = genesis_assets.asset_id +ORDER BY CASE WHEN $1 = 0 THEN universe_roots.id END ASC, CASE WHEN $1 = 1 THEN universe_roots.id END DESC LIMIT $3 OFFSET $2 @@ -1113,10 +1312,10 @@ INSERT INTO federation_global_sync_config ( proof_type, allow_sync_insert, allow_sync_export ) VALUES ($1, $2, $3) -ON CONFLICT(proof_type) - DO UPDATE SET - allow_sync_insert = $2, - allow_sync_export = $3 +ON CONFLICT (proof_type) +DO UPDATE SET +allow_sync_insert = $2, +allow_sync_export = $3 ` type UpsertFederationGlobalSyncConfigParams struct { @@ -1131,7 +1330,7 @@ func (q *Queries) UpsertFederationGlobalSyncConfig(ctx context.Context, arg Upse } const UpsertFederationProofSyncLog = `-- name: UpsertFederationProofSyncLog :one -INSERT INTO federation_proof_sync_log AS log ( +INSERT INTO federation_proof_sync_log ( status, timestamp, sync_direction, proof_leaf_id, universe_root_id, servers_id ) VALUES ( @@ -1140,7 +1339,8 @@ INSERT INTO federation_proof_sync_log AS log ( -- Select the leaf id from the universe_leaves table. SELECT id FROM universe_leaves - WHERE leaf_node_namespace = $4 + WHERE + leaf_node_namespace = $4 AND minting_point = $5 AND script_key_bytes = $6 LIMIT 1 @@ -1161,13 +1361,13 @@ INSERT INTO federation_proof_sync_log AS log ( ) ) ON CONFLICT (sync_direction, proof_leaf_id, universe_root_id, servers_id) DO UPDATE SET - status = EXCLUDED.status, - timestamp = EXCLUDED.timestamp, - -- Increment the attempt counter. - attempt_counter = CASE - WHEN $9 = TRUE THEN log.attempt_counter + 1 - ELSE log.attempt_counter - END +status = excluded.status, +timestamp = excluded.timestamp, +attempt_counter = CASE + WHEN $9 = TRUE + THEN federation_proof_sync_log.attempt_counter + 1 + ELSE federation_proof_sync_log.attempt_counter +END RETURNING id ` @@ -1183,6 +1383,7 @@ type UpsertFederationProofSyncLogParams struct { BumpSyncAttemptCounter interface{} } +// Increment the attempt counter. func (q *Queries) UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFederationProofSyncLogParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertFederationProofSyncLog, arg.Status, @@ -1201,16 +1402,26 @@ func (q *Queries) UpsertFederationProofSyncLog(ctx context.Context, arg UpsertFe } const UpsertFederationUniSyncConfig = `-- name: UpsertFederationUniSyncConfig :exec -INSERT INTO federation_uni_sync_config ( - namespace, asset_id, group_key, proof_type, allow_sync_insert, allow_sync_export +INSERT INTO federation_uni_sync_config ( + namespace, + asset_id, + group_key, + proof_type, + allow_sync_insert, + allow_sync_export ) -VALUES( - $1, $2, $3, $4, $5, $6 +VALUES ( + $1, + $2, + $3, + $4, + $5, + $6 ) -ON CONFLICT(namespace) - DO UPDATE SET - allow_sync_insert = $5, - allow_sync_export = $6 +ON CONFLICT (namespace) +DO UPDATE SET +allow_sync_insert = $5, +allow_sync_export = $6 ` type UpsertFederationUniSyncConfigParams struct { @@ -1242,9 +1453,8 @@ INSERT INTO multiverse_leaves ( $5 ) ON CONFLICT (leaf_node_key, leaf_node_namespace) - -- This is a no-op to allow returning the ID. - DO UPDATE SET leaf_node_key = EXCLUDED.leaf_node_key, - leaf_node_namespace = EXCLUDED.leaf_node_namespace +DO UPDATE SET leaf_node_key = excluded.leaf_node_key, +leaf_node_namespace = excluded.leaf_node_namespace RETURNING id ` @@ -1256,6 +1466,7 @@ type UpsertMultiverseLeafParams struct { LeafNodeNamespace string } +// This is a no-op to allow returning the ID. func (q *Queries) UpsertMultiverseLeaf(ctx context.Context, arg UpsertMultiverseLeafParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertMultiverseLeaf, arg.MultiverseRootID, @@ -1273,8 +1484,7 @@ const UpsertMultiverseRoot = `-- name: UpsertMultiverseRoot :one INSERT INTO multiverse_roots (namespace_root, proof_type) VALUES ($1, $2) ON CONFLICT (namespace_root) - -- This is a no-op to allow returning the ID. - DO UPDATE SET namespace_root = EXCLUDED.namespace_root +DO UPDATE SET namespace_root = excluded.namespace_root RETURNING id ` @@ -1283,6 +1493,7 @@ type UpsertMultiverseRootParams struct { ProofType string } +// This is a no-op to allow returning the ID. func (q *Queries) UpsertMultiverseRoot(ctx context.Context, arg UpsertMultiverseRootParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertMultiverseRoot, arg.NamespaceRoot, arg.ProofType) var id int64 @@ -1292,17 +1503,15 @@ func (q *Queries) UpsertMultiverseRoot(ctx context.Context, arg UpsertMultiverse const UpsertUniverseLeaf = `-- name: UpsertUniverseLeaf :exec INSERT INTO universe_leaves ( - asset_genesis_id, script_key_bytes, universe_root_id, leaf_node_key, + asset_genesis_id, script_key_bytes, universe_root_id, leaf_node_key, leaf_node_namespace, minting_point ) VALUES ( $1, $2, $3, $4, $5, $6 ) ON CONFLICT (minting_point, script_key_bytes, leaf_node_namespace) - -- This is a NOP, minting_point and script_key_bytes are the unique fields - -- that caused the conflict. - DO UPDATE SET minting_point = EXCLUDED.minting_point, - script_key_bytes = EXCLUDED.script_key_bytes, - leaf_node_namespace = EXCLUDED.leaf_node_namespace +DO UPDATE SET minting_point = excluded.minting_point, +script_key_bytes = excluded.script_key_bytes, +leaf_node_namespace = excluded.leaf_node_namespace ` type UpsertUniverseLeafParams struct { @@ -1314,6 +1523,8 @@ type UpsertUniverseLeafParams struct { MintingPoint []byte } +// This is a NOP, minting_point and script_key_bytes are the unique fields +// that caused the conflict. func (q *Queries) UpsertUniverseLeaf(ctx context.Context, arg UpsertUniverseLeafParams) error { _, err := q.db.ExecContext(ctx, UpsertUniverseLeaf, arg.AssetGenesisID, @@ -1332,9 +1543,7 @@ INSERT INTO universe_roots ( ) VALUES ( $1, $2, $3, $4 ) ON CONFLICT (namespace_root) - -- This is a NOP, namespace_root is the unique field that caused the - -- conflict. - DO UPDATE SET namespace_root = EXCLUDED.namespace_root +DO UPDATE SET namespace_root = excluded.namespace_root RETURNING id ` @@ -1345,6 +1554,8 @@ type UpsertUniverseRootParams struct { ProofType sql.NullString } +// This is a NOP, namespace_root is the unique field that caused the +// conflict. func (q *Queries) UpsertUniverseRoot(ctx context.Context, arg UpsertUniverseRootParams) (int64, error) { row := q.db.QueryRowContext(ctx, UpsertUniverseRoot, arg.NamespaceRoot, From 52108846bf06c7a1b69ea0b55907735285497158 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Fri, 20 Jun 2025 17:24:19 +0200 Subject: [PATCH 3/5] make+tools: add custom sqlfluff plugin --- Makefile | 13 +++ tools/sqlfluff/Dockerfile | 14 ++++ .../sqlfluff_plugin_ref_type/pyproject.toml | 14 ++++ .../sqlfluff_plugin_ref_type/__init__.py | 32 ++++++++ .../sqlfluff_plugin_ref_type/rules.py | 79 +++++++++++++++++++ 5 files changed, 152 insertions(+) create mode 100644 tools/sqlfluff/Dockerfile create mode 100644 tools/sqlfluff/sqlfluff_plugin_ref_type/pyproject.toml create mode 100644 tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/__init__.py create mode 100644 tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/rules.py diff --git a/Makefile b/Makefile index 124adcdca..27111396f 100644 --- a/Makefile +++ b/Makefile @@ -57,6 +57,9 @@ DOCKER_TOOLS = docker run \ -v $(shell bash -c "mkdir -p /tmp/go-lint-cache; echo /tmp/go-lint-cache"):/root/.cache/golangci-lint \ -v $$(pwd):/build taproot-assets-tools +DOCKER_SQLFLUFF = docker run --rm --user "$$UID:$$(id -g)" -e UID=$$UID \ + -v $(shell pwd):/sql sqlfluff-builder + GO_VERSION = 1.23.9 GREEN := "\\033[0;32m" @@ -149,6 +152,10 @@ docker-tools: @$(call print, "Building tools docker image.") docker build -q -t taproot-assets-tools $(TOOLS_DIR) +docker-sqlfluff: + @$(call print, "Building sqlfluff docker image.") + docker build -q -t sqlfluff-builder tools/sqlfluff + scratch: build # =================== @@ -268,6 +275,12 @@ sqlc-check: sqlc exit 1; \ fi +sql-lint: docker-sqlfluff + $(DOCKER_SQLFLUFF) lint tapdb/sqlc/migrations/* tapdb/sqlc/queries/* + +sql-fix: docker-sqlfluff + $(DOCKER_SQLFLUFF) fix tapdb/sqlc/migrations/* tapdb/sqlc/queries/* + rpc: @$(call print, "Compiling protos.") cd ./taprpc; ./gen_protos_docker.sh diff --git a/tools/sqlfluff/Dockerfile b/tools/sqlfluff/Dockerfile new file mode 100644 index 000000000..1734bc475 --- /dev/null +++ b/tools/sqlfluff/Dockerfile @@ -0,0 +1,14 @@ +FROM sqlfluff/sqlfluff:latest + +COPY sqlfluff_plugin_ref_type/ /plugins/sqlfluff_plugin_ref_type/ + +ENV PYTHONPATH="/plugins/sqlfluff_plugin_ref_type" + +USER root + +RUN cd /plugins/sqlfluff_plugin_ref_type \ + chown -R 5000 . \ + && chmod -R 777 /plugins /sql \ + && pip install -e . + +USER 5000 diff --git a/tools/sqlfluff/sqlfluff_plugin_ref_type/pyproject.toml b/tools/sqlfluff/sqlfluff_plugin_ref_type/pyproject.toml new file mode 100644 index 000000000..14de3c929 --- /dev/null +++ b/tools/sqlfluff/sqlfluff_plugin_ref_type/pyproject.toml @@ -0,0 +1,14 @@ +[build-system] +requires = ["setuptools>=40.8.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "sqlfluff_plugin_ref_type" +version = "1.0.0" +requires-python = ">=3.9" +dependencies = [ + "sqlfluff>=3.1.0" +] + +[project.entry-points.sqlfluff] +sqlfluff_plugin_ref_type = "sqlfluff_plugin_ref_type" diff --git a/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/__init__.py b/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/__init__.py new file mode 100644 index 000000000..e8e6b51b2 --- /dev/null +++ b/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/__init__.py @@ -0,0 +1,32 @@ +"""A plugin that checks for forbidden foreign key types in SQL + +This uses the rules API supported from 0.4.0 onwards. +""" + +from typing import Any + +from sqlfluff.core.config import load_config_resource +from sqlfluff.core.plugin import hookimpl +from sqlfluff.core.rules import BaseRule, ConfigInfo + + +@hookimpl +def get_rules() -> list[type[BaseRule]]: + """Get plugin rules. + """ + # i.e. we DO recommend importing here: + from .rules import Rule_LL01 # noqa: F811 + + return [Rule_LL01] + + +@hookimpl +def load_default_config() -> dict[str, Any]: + """Loads the default configuration for the plugin.""" + return {} + + +@hookimpl +def get_configs_info() -> dict[str, dict[str, ConfigInfo]]: + """Get rule config validations and descriptions.""" + return {} \ No newline at end of file diff --git a/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/rules.py b/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/rules.py new file mode 100644 index 000000000..49fb847d5 --- /dev/null +++ b/tools/sqlfluff/sqlfluff_plugin_ref_type/sqlfluff_plugin_ref_type/rules.py @@ -0,0 +1,79 @@ +"""A custom rule that checks for forbidden INTEGER foreign keys. + +This uses the rules API supported from 0.4.0 onwards. +""" + +from sqlfluff.core.rules import ( + BaseRule, + LintResult, + RuleContext, +) +from sqlfluff.core.rules.crawlers import SegmentSeekerCrawler +from sqlfluff.utils.functional import FunctionalContext, sp + + +# These two decorators allow plugins +# to be displayed in the sqlfluff docs +class Rule_LL01(BaseRule): + """INTEGER ... REFERENCES is forbidden! Use BIGINT instead. + + **Anti-pattern** + + Using ``INTEGER ... REFERENCES`` in a foreign key definition is creating + a mismatch between the actual data type of the referenced primary key and + the foreign key. See ``scripts/gen_sqlc_docker.sh`` for a long-form + explanation. + + .. code-block:: sql + + transfer_id INTEGER NOT NULL REFERENCES asset_transfers (id), + + **Best practice** + + If the primary key of the referenced table is of type + ``INTEGER PRIMARY KEY``, it will be transformed to ``BIGSERIAL`` in + PostgreSQL. Therefore, the foreign key should also be of type ``BIGINT``. + For SQLite both INTEGER and BIGINT are equivalent, so this rule is for + PostgreSQL. + + .. code-block:: sql + + transfer_id BIGINT NOT NULL REFERENCES asset_transfers (id), + """ + + groups = ("all",) + crawl_behaviour = SegmentSeekerCrawler({"column_definition"}) + is_fix_compatible = True + + def __init__(self, *args, **kwargs): + """Overwrite __init__ to set config.""" + super().__init__(*args, **kwargs) + + def _eval(self, context: RuleContext): + """We should not use INTEGER ... REFERENCES.""" + has_integer = False + has_reference = False + data_type_segment = None + + for child in context.segment.segments: + if child.type == "data_type": + # Check if any part of data_type is INTEGER. + for part in child.segments: + if part.raw.upper() == "INTEGER": + has_integer = True + data_type_segment = part + elif child.type == "column_constraint_segment": + # Look for REFERENCES keyword inside constraints. + for subpart in child.segments: + if "REFERENCES" in subpart.raw.upper(): + has_reference = True + + if has_integer and has_reference: + return LintResult( + anchor=data_type_segment, + description="Must use BIGINT instead of INTEGER as a data " + "type in foreign key references. See " + "scripts/gen_sqlc_docker.sh for explanation.", + ) + + return None From ce46461a7f5906cb8d13c2acdcde21fbf41a333a Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 24 Jun 2025 13:57:42 +0200 Subject: [PATCH 4/5] GitHub+make: lint SQL code --- .github/workflows/main.yaml | 10 ++++++++++ Makefile | 3 +++ 2 files changed, 13 insertions(+) diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 3fc0cd5f0..4d684f90c 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -46,6 +46,16 @@ jobs: - name: Generate sql models run: make sqlc-check + sql-lint: + name: Sqlc lint + runs-on: ubuntu-latest + steps: + - name: git checkout + uses: actions/checkout@v4 + + - name: Run sql lint + run: make sql-lint-ci + rpc-check: name: RPC check runs-on: ubuntu-latest diff --git a/Makefile b/Makefile index 27111396f..6773b8dbf 100644 --- a/Makefile +++ b/Makefile @@ -278,6 +278,9 @@ sqlc-check: sqlc sql-lint: docker-sqlfluff $(DOCKER_SQLFLUFF) lint tapdb/sqlc/migrations/* tapdb/sqlc/queries/* +sql-lint-ci: docker-sqlfluff + $(DOCKER_SQLFLUFF) lint --format github-annotation-native tapdb/sqlc/migrations/* tapdb/sqlc/queries/* + sql-fix: docker-sqlfluff $(DOCKER_SQLFLUFF) fix tapdb/sqlc/migrations/* tapdb/sqlc/queries/* From 4966eb5c2e6e3e6c6e4fe9d43852615c9943f2b4 Mon Sep 17 00:00:00 2001 From: Oliver Gugger Date: Tue, 24 Jun 2025 16:17:07 +0200 Subject: [PATCH 5/5] tapdb: update Postgres replacement rules after linting Because the linter now forces us to always use lowercase function names, we also need to do that to the (case sensitive!) postgres schema replacement map. --- tapdb/postgres.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tapdb/postgres.go b/tapdb/postgres.go index 654c1381a..00e9f06d3 100644 --- a/tapdb/postgres.go +++ b/tapdb/postgres.go @@ -38,7 +38,9 @@ var ( "BLOB": "BYTEA", "INTEGER PRIMARY KEY": "BIGSERIAL PRIMARY KEY", "TIMESTAMP": "TIMESTAMP WITHOUT TIME ZONE", - "UNHEX": "DECODE", + // The SQLFluff linter will force us to use lowercase function + // names consistently, so this needs to be lowercase. + "unhex": "decode", } )