diff --git a/common/src/api/external/mod.rs b/common/src/api/external/mod.rs index 59933c5fe2c..651e8629700 100644 --- a/common/src/api/external/mod.rs +++ b/common/src/api/external/mod.rs @@ -1419,6 +1419,12 @@ impl SimpleIdentityOrName for AntiAffinityGroupMember { // DISKS +#[derive(Clone, Debug, Deserialize, Serialize, JsonSchema)] +#[serde(rename_all = "snake_case")] +pub enum DiskType { + Crucible, +} + /// View of a Disk #[derive(ObjectIdentity, Clone, Debug, Deserialize, Serialize, JsonSchema)] pub struct Disk { @@ -1433,6 +1439,7 @@ pub struct Disk { pub block_size: ByteCount, pub state: DiskState, pub device_path: String, + pub disk_type: DiskType, } /// State of a Disk diff --git a/dev-tools/omdb/src/bin/omdb/db.rs b/dev-tools/omdb/src/bin/omdb/db.rs index fe9d81cfce2..ef12f882b2a 100644 --- a/dev-tools/omdb/src/bin/omdb/db.rs +++ b/dev-tools/omdb/src/bin/omdb/db.rs @@ -65,7 +65,6 @@ use nexus_db_errors::OptionalError; use nexus_db_lookup::DataStoreConnection; use nexus_db_lookup::LookupPath; use nexus_db_model::CrucibleDataset; -use nexus_db_model::Disk; use nexus_db_model::DnsGroup; use nexus_db_model::DnsName; use nexus_db_model::DnsVersion; @@ -116,7 +115,9 @@ use nexus_db_model::to_db_typed_uuid; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; use nexus_db_queries::db::DataStore; +use nexus_db_queries::db::datastore::CrucibleDisk; use nexus_db_queries::db::datastore::CrucibleTargets; +use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_db_queries::db::datastore::InstanceStateComputer; use nexus_db_queries::db::datastore::SQL_BATCH_SIZE; @@ -1910,7 +1911,7 @@ async fn cmd_db_disk_list( let disks = query .limit(i64::from(u32::from(fetch_opts.fetch_limit))) - .select(Disk::as_select()) + .select(db::model::Disk::as_select()) .load_async(&*datastore.pool_connection_for_tests().await?) .await .context("loading disks")?; @@ -2096,11 +2097,10 @@ async fn cmd_db_rack_list( Ok(()) } -/// Run `omdb db disk info `. -async fn cmd_db_disk_info( +async fn crucible_disk_info( opctx: &OpContext, datastore: &DataStore, - args: &DiskInfoArgs, + disk: CrucibleDisk, ) -> Result<(), anyhow::Error> { // The row describing the instance #[derive(Tabled)] @@ -2125,20 +2125,17 @@ async fn cmd_db_disk_info( physical_disk: String, } - use nexus_db_schema::schema::disk::dsl as disk_dsl; - let conn = datastore.pool_connection_for_tests().await?; - let disk = disk_dsl::disk - .filter(disk_dsl::id.eq(args.uuid)) - .limit(1) - .select(Disk::as_select()) - .load_async(&*conn) - .await - .context("loading requested disk")?; + let disk_name = disk.name().to_string(); - let Some(disk) = disk.into_iter().next() else { - bail!("no disk: {} found", args.uuid); + let volume_id = disk.volume_id().to_string(); + + let disk_state = disk.runtime().disk_state.to_string(); + + let import_address = match disk.pantry_address() { + Some(ref pa) => pa.clone().to_string(), + None => "-".to_string(), }; // For information about where this disk is attached. @@ -2172,7 +2169,7 @@ async fn cmd_db_disk_info( }; let instance_name = instance.instance().name().to_string(); - let disk_name = disk.name().to_string(); + if instance.vmm().is_some() { let propolis_id = instance.instance().runtime().propolis_id.unwrap(); @@ -2184,48 +2181,36 @@ async fn cmd_db_disk_info( .await .context("failed to look up sled")?; - let import_address = match disk.pantry_address { - Some(ref pa) => pa.clone().to_string(), - None => "-".to_string(), - }; UpstairsRow { host_serial: my_sled.serial_number().to_string(), disk_name, instance_name, propolis_zone: format!("oxz_propolis-server_{}", propolis_id), - volume_id: disk.volume_id().to_string(), - disk_state: disk.runtime_state.disk_state.to_string(), + volume_id, + disk_state, import_address, } } else { - let import_address = match disk.pantry_address { - Some(ref pa) => pa.clone().to_string(), - None => "-".to_string(), - }; UpstairsRow { host_serial: NOT_ON_SLED_MSG.to_string(), disk_name, instance_name, propolis_zone: NO_ACTIVE_PROPOLIS_MSG.to_string(), - volume_id: disk.volume_id().to_string(), - disk_state: disk.runtime_state.disk_state.to_string(), + volume_id, + disk_state, import_address, } } } else { // If the disk is not attached to anything, just print empty // fields. - let import_address = match disk.pantry_address { - Some(ref pa) => pa.clone().to_string(), - None => "-".to_string(), - }; UpstairsRow { host_serial: "-".to_string(), - disk_name: disk.name().to_string(), + disk_name, instance_name: "-".to_string(), propolis_zone: "-".to_string(), - volume_id: disk.volume_id().to_string(), - disk_state: disk.runtime_state.disk_state.to_string(), + volume_id, + disk_state, import_address, } }; @@ -2274,9 +2259,23 @@ async fn cmd_db_disk_info( println!("{}", table); get_and_display_vcr(disk.volume_id(), datastore).await?; + Ok(()) } +/// Run `omdb db disk info `. +async fn cmd_db_disk_info( + opctx: &OpContext, + datastore: &DataStore, + args: &DiskInfoArgs, +) -> Result<(), anyhow::Error> { + match datastore.disk_get(opctx, args.uuid).await? { + Disk::Crucible(disk) => { + crucible_disk_info(opctx, datastore, disk).await + } + } +} + // Given a UUID, search the database for a volume with that ID // If found, attempt to parse the .data field into a VolumeConstructionRequest // and display it if successful. @@ -2397,7 +2396,7 @@ async fn cmd_db_disk_physical( .context("loading region")?; for rs in regions { - volume_ids.insert(rs.volume_id().into_untyped_uuid()); + volume_ids.insert(rs.volume_id()); } } @@ -2405,17 +2404,14 @@ async fn cmd_db_disk_physical( // that is part of a dataset on a pool on our disk. The next step is // to find the virtual disks associated with these volume IDs and // display information about those disks. - use nexus_db_schema::schema::disk::dsl; - let mut query = dsl::disk.into_boxed(); - if !fetch_opts.include_deleted { - query = query.filter(dsl::time_deleted.is_null()); - } - let disks = query - .filter(dsl::volume_id.eq_any(volume_ids)) - .limit(i64::from(u32::from(fetch_opts.fetch_limit))) - .select(Disk::as_select()) - .load_async(&*conn) + let disks: Vec = datastore + .disks_get_matching_volumes( + &conn, + &volume_ids, + fetch_opts.include_deleted, + i64::from(u32::from(fetch_opts.fetch_limit)), + ) .await .context("loading disks")?; @@ -3472,26 +3468,20 @@ async fn volume_used_by( fetch_opts: &DbFetchOptions, volumes: &[Uuid], ) -> Result, anyhow::Error> { - let disks_used: Vec = { - let volumes = volumes.to_vec(); - datastore - .pool_connection_for_tests() - .await? - .transaction_async(async move |conn| { - use nexus_db_schema::schema::disk::dsl; - - conn.batch_execute_async(ALLOW_FULL_TABLE_SCAN_SQL).await?; + let disks_used: Vec = { + let conn = datastore.pool_connection_for_tests().await?; + let volumes: HashSet = volumes + .iter() + .map(|id| VolumeUuid::from_untyped_uuid(*id)) + .collect(); - paginated( - dsl::disk, - dsl::id, - &first_page::(fetch_opts.fetch_limit), - ) - .filter(dsl::volume_id.eq_any(volumes)) - .select(Disk::as_select()) - .load_async(&conn) - .await - }) + datastore + .disks_get_matching_volumes( + &conn, + &volumes, + fetch_opts.include_deleted, + i64::from(u32::from(fetch_opts.fetch_limit)), + ) .await? }; @@ -4632,7 +4622,7 @@ async fn cmd_db_instance_info( } let disks = query - .select(Disk::as_select()) + .select(db::model::Disk::as_select()) .load_async(&*datastore.pool_connection_for_tests().await?) .await .with_context(ctx)?; diff --git a/nexus/db-model/src/disk.rs b/nexus/db-model/src/disk.rs index 0be30b3e1c7..5aa592c70fc 100644 --- a/nexus/db-model/src/disk.rs +++ b/nexus/db-model/src/disk.rs @@ -2,24 +2,33 @@ // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use super::{BlockSize, ByteCount, DiskState, Generation}; -use crate::typed_uuid::DbTypedUuid; +use super::BlockSize; +use super::ByteCount; +use super::DiskState; +use super::Generation; +use super::impl_enum_type; use crate::unsigned::SqlU8; use chrono::{DateTime, Utc}; use db_macros::Resource; use nexus_db_schema::schema::disk; use nexus_types::external_api::params; -use nexus_types::identity::Resource; use omicron_common::api::external; use omicron_common::api::internal; -use omicron_uuid_kinds::VolumeKind; -use omicron_uuid_kinds::VolumeUuid; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; -use std::net::SocketAddrV6; use uuid::Uuid; -/// A Disk (network block device). +impl_enum_type!( + DiskTypeEnum: + + #[derive(Copy, Clone, Debug, AsExpression, FromSqlRow, Serialize, Deserialize, PartialEq)] + pub enum DiskType; + + // Enum values + Crucible => b"crucible" +); + +/// A Disk, where how the blocks are stored depend on the disk_type. #[derive( Queryable, Insertable, @@ -41,9 +50,6 @@ pub struct Disk { /// id for the project containing this Disk pub project_id: Uuid, - /// Root volume of the disk - volume_id: DbTypedUuid, - /// runtime state of the Disk #[diesel(embed)] pub runtime_state: DiskRuntimeState, @@ -65,57 +71,35 @@ pub struct Disk { /// size of blocks (512, 2048, or 4096) pub block_size: BlockSize, - /// id for the snapshot from which this Disk was created (None means a blank - /// disk) - #[diesel(column_name = origin_snapshot)] - pub create_snapshot_id: Option, - - /// id for the image from which this Disk was created (None means a blank - /// disk) - #[diesel(column_name = origin_image)] - pub create_image_id: Option, - - /// If this disk is attached to a Pantry for longer than the lifetime of a - /// saga, then this field will contain the serialized SocketAddrV6 of that - /// Pantry. - pub pantry_address: Option, + /// Information unique to each type of disk is stored in a separate table + /// (where rows are matched based on the disk_id field in that table) and + /// combined into a higher level `datastore::Disk` enum. + /// + /// For `Crucible` disks, see the `disk_type_crucible` table. + pub disk_type: DiskType, } impl Disk { pub fn new( disk_id: Uuid, project_id: Uuid, - volume_id: VolumeUuid, - params: params::DiskCreate, + params: ¶ms::DiskCreate, block_size: BlockSize, runtime_initial: DiskRuntimeState, - ) -> Result { - let identity = DiskIdentity::new(disk_id, params.identity); - - let create_snapshot_id = match params.disk_source { - params::DiskSource::Snapshot { snapshot_id } => Some(snapshot_id), - _ => None, - }; - - // XXX further enum here for different image types? - let create_image_id = match params.disk_source { - params::DiskSource::Image { image_id } => Some(image_id), - _ => None, - }; + disk_type: DiskType, + ) -> Self { + let identity = DiskIdentity::new(disk_id, params.identity.clone()); - Ok(Self { + Self { identity, rcgen: external::Generation::new().into(), project_id, - volume_id: volume_id.into(), runtime_state: runtime_initial, slot: None, size: params.size.into(), block_size, - create_snapshot_id, - create_image_id, - pantry_address: None, - }) + disk_type, + } } pub fn state(&self) -> DiskState { @@ -130,29 +114,8 @@ impl Disk { self.identity.id } - pub fn pantry_address(&self) -> Option { - self.pantry_address.as_ref().map(|x| x.parse().unwrap()) - } - - pub fn volume_id(&self) -> VolumeUuid { - self.volume_id.into() - } -} - -/// Conversion to the external API type. -impl Into for Disk { - fn into(self) -> external::Disk { - let device_path = format!("/mnt/{}", self.name().as_str()); - external::Disk { - identity: self.identity(), - project_id: self.project_id, - snapshot_id: self.create_snapshot_id, - image_id: self.create_image_id, - size: self.size.into(), - block_size: self.block_size.into(), - state: self.state().into(), - device_path, - } + pub fn slot(&self) -> Option { + self.slot.map(Into::into) } } @@ -308,10 +271,3 @@ impl Into for DiskRuntimeState { } } } - -#[derive(AsChangeset)] -#[diesel(table_name = disk)] -#[diesel(treat_none_as_null = true)] -pub struct DiskUpdate { - pub pantry_address: Option, -} diff --git a/nexus/db-model/src/disk_type_crucible.rs b/nexus/db-model/src/disk_type_crucible.rs new file mode 100644 index 00000000000..600a1002c8e --- /dev/null +++ b/nexus/db-model/src/disk_type_crucible.rs @@ -0,0 +1,82 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::typed_uuid::DbTypedUuid; +use nexus_db_schema::schema::disk_type_crucible; +use nexus_types::external_api::params; +use omicron_uuid_kinds::VolumeKind; +use omicron_uuid_kinds::VolumeUuid; +use serde::{Deserialize, Serialize}; +use std::net::SocketAddrV6; +use uuid::Uuid; + +/// A Disk can be backed using Crucible, a distributed network-replicated block +/// storage service. +#[derive( + Queryable, Insertable, Clone, Debug, Selectable, Serialize, Deserialize, +)] +#[diesel(table_name = disk_type_crucible)] +pub struct DiskTypeCrucible { + disk_id: Uuid, + + /// Root volume of the disk + volume_id: DbTypedUuid, + + /// id for the snapshot from which this Disk was created (None means a blank + /// disk) + #[diesel(column_name = origin_snapshot)] + pub create_snapshot_id: Option, + + /// id for the image from which this Disk was created (None means a blank + /// disk) + #[diesel(column_name = origin_image)] + pub create_image_id: Option, + + /// If this disk is attached to a Pantry for longer than the lifetime of a + /// saga, then this field will contain the serialized SocketAddrV6 of that + /// Pantry. + pub pantry_address: Option, +} + +impl DiskTypeCrucible { + pub fn new( + disk_id: Uuid, + volume_id: VolumeUuid, + params: ¶ms::DiskCreate, + ) -> Self { + let create_snapshot_id = match params.disk_source { + params::DiskSource::Snapshot { snapshot_id } => Some(snapshot_id), + _ => None, + }; + + // XXX further enum here for different image types? + let create_image_id = match params.disk_source { + params::DiskSource::Image { image_id } => Some(image_id), + _ => None, + }; + + Self { + disk_id, + volume_id: volume_id.into(), + create_snapshot_id, + create_image_id, + pantry_address: None, + } + } + + pub fn pantry_address(&self) -> Option { + self.pantry_address.as_ref().map(|x| x.parse().unwrap()) + } + + pub fn volume_id(&self) -> VolumeUuid { + self.volume_id.into() + } +} + +#[derive(AsChangeset)] +#[diesel(table_name = disk_type_crucible)] +#[diesel(treat_none_as_null = true)] +pub struct DiskTypeCrucibleUpdate { + pub pantry_address: Option, +} diff --git a/nexus/db-model/src/lib.rs b/nexus/db-model/src/lib.rs index 32e4d4747dd..63d355e3c88 100644 --- a/nexus/db-model/src/lib.rs +++ b/nexus/db-model/src/lib.rs @@ -35,6 +35,7 @@ mod device_auth; mod digest; mod disk; mod disk_state; +mod disk_type_crucible; mod dns; mod downstairs; mod external_ip; @@ -177,6 +178,7 @@ pub use device_auth::*; pub use digest::*; pub use disk::*; pub use disk_state::*; +pub use disk_type_crucible::*; pub use dns::*; pub use downstairs::*; pub use ereport::*; diff --git a/nexus/db-model/src/schema_versions.rs b/nexus/db-model/src/schema_versions.rs index be8d77012f3..29f73c5079b 100644 --- a/nexus/db-model/src/schema_versions.rs +++ b/nexus/db-model/src/schema_versions.rs @@ -16,7 +16,7 @@ use std::{collections::BTreeMap, sync::LazyLock}; /// /// This must be updated when you change the database schema. Refer to /// schema/crdb/README.adoc in the root of this repository for details. -pub const SCHEMA_VERSION: Version = Version::new(201, 0, 0); +pub const SCHEMA_VERSION: Version = Version::new(202, 0, 0); /// List of all past database schema versions, in *reverse* order /// @@ -28,6 +28,7 @@ static KNOWN_VERSIONS: LazyLock> = LazyLock::new(|| { // | leaving the first copy as an example for the next person. // v // KnownVersion::new(next_int, "unique-dirname-with-the-sql-files"), + KnownVersion::new(202, "disk-types"), KnownVersion::new(201, "scim-client-bearer-token"), KnownVersion::new(200, "dual-stack-network-interfaces"), KnownVersion::new(199, "multicast-pool-support"), diff --git a/nexus/db-queries/src/db/datastore/disk.rs b/nexus/db-queries/src/db/datastore/disk.rs index 3f98aef0f23..ba4d8d254c1 100644 --- a/nexus/db-queries/src/db/datastore/disk.rs +++ b/nexus/db-queries/src/db/datastore/disk.rs @@ -15,10 +15,13 @@ use crate::db::collection_detach::DatastoreDetachTarget; use crate::db::collection_detach::DetachError; use crate::db::collection_insert::AsyncInsertError; use crate::db::collection_insert::DatastoreCollection; +use crate::db::datastore::DbConnection; use crate::db::identity::Resource; -use crate::db::model::Disk; +use crate::db::model; use crate::db::model::DiskRuntimeState; -use crate::db::model::DiskUpdate; +use crate::db::model::DiskState; +use crate::db::model::DiskTypeCrucible; +use crate::db::model::DiskTypeCrucibleUpdate; use crate::db::model::Instance; use crate::db::model::Name; use crate::db::model::Project; @@ -34,6 +37,7 @@ use chrono::DateTime; use chrono::Utc; use diesel::prelude::*; use nexus_db_errors::ErrorHandler; +use nexus_db_errors::OptionalError; use nexus_db_errors::public_error_from_diesel; use nexus_db_lookup::LookupPath; use omicron_common::api; @@ -45,13 +49,216 @@ use omicron_common::api::external::LookupType; use omicron_common::api::external::ResourceType; use omicron_common::api::external::UpdateResult; use omicron_common::api::external::http_pagination::PaginatedBy; -use omicron_common::bail_unless; +use omicron_uuid_kinds::GenericUuid; use omicron_uuid_kinds::VolumeUuid; use ref_cast::RefCast; +use serde::Deserialize; +use serde::Serialize; +use std::collections::HashSet; use std::net::SocketAddrV6; use uuid::Uuid; +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Disk { + Crucible(CrucibleDisk), +} + +impl Disk { + pub fn model(&self) -> &model::Disk { + match &self { + Disk::Crucible(disk) => disk.model(), + } + } + + pub fn id(&self) -> Uuid { + self.model().id() + } + + pub fn name(&self) -> &api::external::Name { + self.model().name() + } + + pub fn time_deleted(&self) -> Option> { + self.model().time_deleted() + } + + pub fn project_id(&self) -> Uuid { + self.model().project_id + } + + pub fn runtime(&self) -> DiskRuntimeState { + self.model().runtime() + } + + pub fn state(&self) -> DiskState { + self.model().state() + } + + pub fn size(&self) -> model::ByteCount { + self.model().size + } + + pub fn slot(&self) -> Option { + self.model().slot() + } + + pub fn block_size(&self) -> model::BlockSize { + self.model().block_size + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct CrucibleDisk { + pub disk: model::Disk, + pub disk_type_crucible: DiskTypeCrucible, +} + +impl CrucibleDisk { + pub fn model(&self) -> &model::Disk { + &self.disk + } + + pub fn id(&self) -> Uuid { + self.model().id() + } + + pub fn name(&self) -> &api::external::Name { + self.model().name() + } + + pub fn time_deleted(&self) -> Option> { + self.model().time_deleted() + } + + pub fn project_id(&self) -> Uuid { + self.model().project_id + } + + pub fn runtime(&self) -> DiskRuntimeState { + self.model().runtime() + } + + pub fn state(&self) -> DiskState { + self.model().state() + } + + pub fn size(&self) -> model::ByteCount { + self.model().size + } + + pub fn slot(&self) -> Option { + self.model().slot() + } + + pub fn volume_id(&self) -> VolumeUuid { + self.disk_type_crucible.volume_id() + } + + pub fn pantry_address(&self) -> Option { + self.disk_type_crucible.pantry_address() + } +} + +/// Conversion to the external API type. +impl Into for Disk { + fn into(self) -> api::external::Disk { + match self { + Disk::Crucible(CrucibleDisk { disk, disk_type_crucible }) => { + // XXX can we remove this? + let device_path = format!("/mnt/{}", disk.name().as_str()); + api::external::Disk { + identity: disk.identity(), + project_id: disk.project_id, + snapshot_id: disk_type_crucible.create_snapshot_id, + image_id: disk_type_crucible.create_image_id, + size: disk.size.into(), + block_size: disk.block_size.into(), + state: disk.state().into(), + device_path, + disk_type: api::external::DiskType::Crucible, + } + } + } + } +} + impl DataStore { + pub async fn disk_get( + &self, + opctx: &OpContext, + disk_id: Uuid, + ) -> LookupResult { + let (.., disk) = + LookupPath::new(opctx, self).disk_id(disk_id).fetch().await?; + + let disk = match disk.disk_type { + db::model::DiskType::Crucible => { + let conn = self.pool_connection_authorized(opctx).await?; + + use nexus_db_schema::schema::disk_type_crucible::dsl; + + let disk_type_crucible = dsl::disk_type_crucible + .filter(dsl::disk_id.eq(disk_id)) + .select(DiskTypeCrucible::as_select()) + .first_async(&*conn) + .await + .map_err(|e| { + public_error_from_diesel(e, ErrorHandler::Server) + })?; + + Disk::Crucible(CrucibleDisk { disk, disk_type_crucible }) + } + }; + + Ok(disk) + } + + /// Return all the Crucible Disks matching a list of volume IDs. Currently + /// this is only used by omdb. + pub async fn disks_get_matching_volumes( + &self, + conn: &async_bb8_diesel::Connection, + volume_ids: &HashSet, + include_deleted: bool, + limit: i64, + ) -> ListResultVec { + use nexus_db_schema::schema::disk::dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; + + let mut query = dsl::disk.into_boxed(); + if !include_deleted { + query = query.filter(dsl::time_deleted.is_null()); + } + + let volume_ids: Vec = volume_ids + .iter() + .map(|volume_id| volume_id.into_untyped_uuid()) + .collect(); + + let result: Vec = query + .inner_join( + disk_type_crucible_dsl::disk_type_crucible + .on(disk_type_crucible_dsl::disk_id.eq(dsl::id)), + ) + .filter(disk_type_crucible_dsl::volume_id.eq_any(volume_ids)) + .limit(limit) + .select(( + db::model::Disk::as_select(), + db::model::DiskTypeCrucible::as_select(), + )) + .load_async(conn) + .await + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))? + .into_iter() + .map(|(disk, disk_type_crucible)| CrucibleDisk { + disk, + disk_type_crucible, + }) + .collect(); + + Ok(result) + } + /// List disks associated with a given instance by name. pub async fn instance_list_disks( &self, @@ -60,10 +267,11 @@ impl DataStore { pagparams: &PaginatedBy<'_>, ) -> ListResultVec { use nexus_db_schema::schema::disk::dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; opctx.authorize(authz::Action::ListChildren, authz_instance).await?; - match pagparams { + let results = match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::disk, dsl::id, &pagparams) } @@ -73,59 +281,156 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } + .left_join( + disk_type_crucible_dsl::disk_type_crucible + .on(dsl::id.eq(disk_type_crucible_dsl::disk_id)), + ) .filter(dsl::time_deleted.is_null()) .filter(dsl::attach_instance_id.eq(authz_instance.id())) - .select(Disk::as_select()) - .load_async::(&*self.pool_connection_authorized(opctx).await?) + .select(( + model::Disk::as_select(), + Option::::as_select(), + )) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let mut list = Vec::with_capacity(results.len()); + + for result in results { + match result { + (disk, Some(disk_type_crucible)) => { + list.push(Disk::Crucible(CrucibleDisk { + disk, + disk_type_crucible, + })); + } + + (disk, None) => { + // The above paginated query attempts to get all types of + // disk in one query, instead of matching on the disk type + // of each returned disk row and doing additional queries. + // + // If we're in this branch then that query didn't return the + // type-specific information for a disk. It's possible that + // disk was constructed wrong, or that a new disk type + // hasn't been added to the above query and this match. + return Err(Error::internal_error(&format!( + "disk {} is type {:?}, but no type-specific row found!", + disk.id(), + disk.disk_type, + ))); + } + } + } + + Ok(list) } - pub async fn project_create_disk( - &self, - opctx: &OpContext, + pub(super) async fn project_create_disk_in_txn( + conn: &async_bb8_diesel::Connection, + err: OptionalError, authz_project: &authz::Project, disk: Disk, - ) -> CreateResult { + ) -> Result { use nexus_db_schema::schema::disk::dsl; - - opctx.authorize(authz::Action::CreateChild, authz_project).await?; + use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; let gen = disk.runtime().gen; let name = disk.name().clone(); - let project_id = disk.project_id; + let project_id = disk.project_id(); - let disk: Disk = Project::insert_resource( + let disk_model: model::Disk = Project::insert_resource( project_id, diesel::insert_into(dsl::disk) - .values(disk) + .values(disk.model().clone()) .on_conflict(dsl::id) .do_update() .set(dsl::time_modified.eq(dsl::time_modified)), ) - .insert_and_get_result_async( - &*self.pool_connection_authorized(opctx).await?, - ) + .insert_and_get_result_async(conn) .await - .map_err(|e| match e { - AsyncInsertError::CollectionNotFound => authz_project.not_found(), - AsyncInsertError::DatabaseError(e) => public_error_from_diesel( - e, - ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), - ), + .map_err(|e| { + err.bail(match e { + AsyncInsertError::CollectionNotFound => { + authz_project.not_found() + } + AsyncInsertError::DatabaseError(e) => public_error_from_diesel( + e, + ErrorHandler::Conflict(ResourceType::Disk, name.as_str()), + ), + }) })?; - let runtime = disk.runtime(); - bail_unless!( - runtime.state().state() == &api::external::DiskState::Creating, - "newly-created Disk has unexpected state: {:?}", - runtime.disk_state - ); - bail_unless!( - runtime.gen == gen, - "newly-created Disk has unexpected generation: {:?}", - runtime.gen - ); + match &disk { + Disk::Crucible(CrucibleDisk { disk: _, disk_type_crucible }) => { + diesel::insert_into(disk_type_crucible_dsl::disk_type_crucible) + .values(disk_type_crucible.clone()) + .on_conflict(disk_type_crucible_dsl::disk_id) + .do_nothing() + .execute_async(conn) + .await?; + } + } + + // Perform a few checks in the transaction on the inserted Disk to + // ensure that the newly created Disk is valid (even if there was an + // insertion conflict). + + if disk_model.state().state() != &api::external::DiskState::Creating { + return Err(err.bail(Error::internal_error(&format!( + "newly-created Disk has unexpected state: {:?}", + disk_model.state(), + )))); + } + + let runtime = disk_model.runtime(); + + if runtime.gen != gen { + return Err(err.bail(Error::internal_error(&format!( + "newly-created Disk has unexpected generation: {:?}", + runtime.gen + )))); + } + + Ok(disk) + } + + pub async fn project_create_disk( + &self, + opctx: &OpContext, + authz_project: &authz::Project, + disk: Disk, + ) -> CreateResult { + opctx.authorize(authz::Action::CreateChild, authz_project).await?; + + let err = OptionalError::new(); + let conn = self.pool_connection_authorized(opctx).await?; + + let disk = self + .transaction_retry_wrapper("project_create_disk") + .transaction(&conn, |conn| { + let disk = disk.clone(); + let err = err.clone(); + async move { + Self::project_create_disk_in_txn( + &conn, + err, + authz_project, + disk, + ) + .await + } + }) + .await + .map_err(|e| { + if let Some(err) = err.take() { + err + } else { + public_error_from_diesel(e, ErrorHandler::Server) + } + })?; + Ok(disk) } @@ -138,7 +443,9 @@ impl DataStore { opctx.authorize(authz::Action::ListChildren, authz_project).await?; use nexus_db_schema::schema::disk::dsl; - match pagparams { + use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; + + let results = match pagparams { PaginatedBy::Id(pagparams) => { paginated(dsl::disk, dsl::id, &pagparams) } @@ -148,12 +455,41 @@ impl DataStore { &pagparams.map_name(|n| Name::ref_cast(n)), ), } + .left_join( + disk_type_crucible_dsl::disk_type_crucible + .on(dsl::id.eq(disk_type_crucible_dsl::disk_id)), + ) .filter(dsl::time_deleted.is_null()) .filter(dsl::project_id.eq(authz_project.id())) - .select(Disk::as_select()) - .load_async::(&*self.pool_connection_authorized(opctx).await?) + .select(( + model::Disk::as_select(), + Option::::as_select(), + )) + .get_results_async(&*self.pool_connection_authorized(opctx).await?) .await - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + let mut list = Vec::with_capacity(results.len()); + + for result in results { + match result { + (disk, Some(disk_type_crucible)) => { + list.push(Disk::Crucible(CrucibleDisk { + disk, + disk_type_crucible, + })); + } + + (disk, None) => { + return Err(Error::internal_error(&format!( + "disk {} is invalid!", + disk.id(), + ))); + } + } + } + + Ok(list) } /// Attaches a disk to an instance, if both objects: @@ -206,9 +542,11 @@ impl DataStore { diesel::update(disk::dsl::disk).set(attach_update), ); - let (instance, disk) = query.attach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) + let conn = self.pool_connection_authorized(opctx).await?; + + let (instance, _db_disk) = query.attach_and_get_result_async(&conn) .await - .or_else(|e: AttachError| { + .or_else(|e: AttachError| { match e { AttachError::CollectionNotFound => { Err(Error::not_found_by_id( @@ -298,6 +636,8 @@ impl DataStore { } })?; + let disk = self.disk_get(&opctx, authz_disk.id()).await?; + Ok((instance, disk)) } @@ -330,7 +670,9 @@ impl DataStore { let detached_label = api::external::DiskState::Detached.label(); - let disk = Instance::detach_resource( + let conn = self.pool_connection_authorized(opctx).await?; + + let _db_disk = Instance::detach_resource( authz_instance.id(), authz_disk.id(), instance::table @@ -352,9 +694,9 @@ impl DataStore { disk::dsl::slot.eq(Option::::None) )) ) - .detach_and_get_result_async(&*self.pool_connection_authorized(opctx).await?) + .detach_and_get_result_async(&conn) .await - .or_else(|e: DetachError| { + .or_else(|e: DetachError| { match e { DetachError::CollectionNotFound => { Err(Error::not_found_by_id( @@ -445,6 +787,8 @@ impl DataStore { } })?; + let disk = self.disk_get(&opctx, authz_disk.id()).await?; + Ok(disk) } @@ -472,7 +816,7 @@ impl DataStore { .filter(dsl::id.eq(disk_id)) .filter(dsl::state_generation.lt(new_runtime.gen)) .set(new_runtime.clone()) - .check_if_exists::(disk_id) + .check_if_exists::(disk_id) .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { @@ -495,15 +839,22 @@ impl DataStore { authz_disk: &authz::Disk, pantry_address: SocketAddrV6, ) -> UpdateResult { + use nexus_db_schema::schema::disk::dsl as disk_dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl; + opctx.authorize(authz::Action::Modify, authz_disk).await?; let disk_id = authz_disk.id(); - use nexus_db_schema::schema::disk::dsl; - let updated = diesel::update(dsl::disk) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(disk_id)) + + let updated = diesel::update(dsl::disk_type_crucible) + .filter(diesel::dsl::exists( + disk_dsl::disk + .filter(disk_dsl::id.eq(disk_id)) + .filter(disk_dsl::time_deleted.is_null()), + )) + .filter(dsl::disk_id.eq(disk_id)) .set(dsl::pantry_address.eq(pantry_address.to_string())) - .check_if_exists::(disk_id) + .check_if_exists::(disk_id) .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { @@ -525,15 +876,22 @@ impl DataStore { opctx: &OpContext, authz_disk: &authz::Disk, ) -> UpdateResult { + use nexus_db_schema::schema::disk::dsl as disk_dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl; + opctx.authorize(authz::Action::Modify, authz_disk).await?; let disk_id = authz_disk.id(); - use nexus_db_schema::schema::disk::dsl; - let updated = diesel::update(dsl::disk) - .filter(dsl::time_deleted.is_null()) - .filter(dsl::id.eq(disk_id)) - .set(&DiskUpdate { pantry_address: None }) - .check_if_exists::(disk_id) + + let updated = diesel::update(dsl::disk_type_crucible) + .filter(diesel::dsl::exists( + disk_dsl::disk + .filter(disk_dsl::id.eq(disk_id)) + .filter(disk_dsl::time_deleted.is_null()), + )) + .filter(dsl::disk_id.eq(disk_id)) + .set(&DiskTypeCrucibleUpdate { pantry_address: None }) + .check_if_exists::(disk_id) .execute_and_check(&*self.pool_connection_authorized(opctx).await?) .await .map(|r| match r.status { @@ -550,33 +908,6 @@ impl DataStore { Ok(updated) } - /// Fetches information about a Disk that the caller has previously fetched - /// - /// The only difference between this function and a new fetch by id is that - /// this function preserves the `authz_disk` that you started with -- which - /// keeps track of how you looked it up. So if you looked it up by name, - /// the authz you get back will reflect that, whereas if you did a fresh - /// lookup by id, it wouldn't. - /// TODO-cleanup this could be provided by the Lookup API for any resource - pub async fn disk_refetch( - &self, - opctx: &OpContext, - authz_disk: &authz::Disk, - ) -> LookupResult { - let (.., db_disk) = LookupPath::new(opctx, self) - .disk_id(authz_disk.id()) - .fetch() - .await - .map_err(|e| match e { - // Use the "not found" message of the authz object we were - // given, which will reflect however the caller originally - // looked it up. - Error::ObjectNotFound { .. } => authz_disk.not_found(), - e => e, - })?; - Ok(db_disk) - } - /// Updates a disk record to indicate it has been deleted. /// /// Returns the disk before any modifications are made by this function. @@ -604,7 +935,7 @@ impl DataStore { &self, disk_id: &Uuid, ok_to_delete_states: &[api::external::DiskState], - ) -> Result { + ) -> Result { use nexus_db_schema::schema::disk::dsl; let conn = self.pool_connection_unauthorized().await?; let now = Utc::now(); @@ -619,7 +950,7 @@ impl DataStore { .filter(dsl::disk_state.eq_any(ok_to_delete_state_labels)) .filter(dsl::attach_instance_id.is_null()) .set((dsl::disk_state.eq(destroyed), dsl::time_deleted.eq(now))) - .check_if_exists::(*disk_id) + .check_if_exists::(*disk_id) .execute_and_check(&conn) .await .map_err(|e| { @@ -706,7 +1037,7 @@ impl DataStore { dsl::disk_state.eq(faulted), dsl::name.eq(new_name), )) - .check_if_exists::(*disk_id) + .check_if_exists::(*disk_id) .execute_and_check(&conn) .await .map_err(|e| { @@ -750,26 +1081,31 @@ impl DataStore { /// in customer-support#58) would mean the disk was deleted but the project /// it was in could not be deleted (due to an erroneous number of bytes /// "still provisioned"). - pub async fn find_phantom_disks(&self) -> ListResultVec { - use nexus_db_schema::schema::disk::dsl; + pub async fn find_phantom_disks(&self) -> ListResultVec { + use nexus_db_schema::schema::disk::dsl as disk_dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl; use nexus_db_schema::schema::virtual_provisioning_resource::dsl as resource_dsl; use nexus_db_schema::schema::volume::dsl as volume_dsl; let conn = self.pool_connection_unauthorized().await?; let potential_phantom_disks: Vec<( - Disk, + model::Disk, Option, Option, - )> = dsl::disk - .filter(dsl::time_deleted.is_not_null()) + )> = disk_dsl::disk + // only Crucible disks have volumes + .inner_join( + dsl::disk_type_crucible.on(dsl::disk_id.eq(disk_dsl::id)), + ) .left_join( resource_dsl::virtual_provisioning_resource - .on(resource_dsl::id.eq(dsl::id)), + .on(resource_dsl::id.eq(disk_dsl::id)), ) .left_join(volume_dsl::volume.on(dsl::volume_id.eq(volume_dsl::id))) + .filter(disk_dsl::time_deleted.is_not_null()) .select(( - Disk::as_select(), + model::Disk::as_select(), Option::::as_select(), Option::::as_select(), )) @@ -824,20 +1160,31 @@ impl DataStore { .collect()) } + /// Returns a Some(disk) that has a matching volume ID, None if no disk + /// matches that volume ID, or an error. Only disks of type `Crucible` have + /// volumes, so that is the returned type. pub async fn disk_for_volume_id( &self, volume_id: VolumeUuid, - ) -> LookupResult> { + ) -> LookupResult> { let conn = self.pool_connection_unauthorized().await?; - use nexus_db_schema::schema::disk::dsl; - dsl::disk + use nexus_db_schema::schema::disk::dsl as disk_dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl; + + let maybe_tuple = dsl::disk_type_crucible + .inner_join(disk_dsl::disk.on(disk_dsl::id.eq(dsl::disk_id))) .filter(dsl::volume_id.eq(to_db_typed_uuid(volume_id))) - .select(Disk::as_select()) + .select((model::Disk::as_select(), DiskTypeCrucible::as_select())) .first_async(&*conn) .await .optional() - .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server)) + .map_err(|e| public_error_from_diesel(e, ErrorHandler::Server))?; + + Ok(maybe_tuple.map(|(disk, disk_type_crucible)| CrucibleDisk { + disk, + disk_type_crucible, + })) } } @@ -876,29 +1223,41 @@ mod tests { .await .unwrap(); + let disk_id = Uuid::new_v4(); + let create_params = params::DiskCreate { + identity: external::IdentityMetadataCreateParams { + name: "first-post".parse().unwrap(), + description: "just trying things out".to_string(), + }, + disk_source: params::DiskSource::Blank { + block_size: params::BlockSize::try_from(512).unwrap(), + }, + size: external::ByteCount::from(2147483648), + }; + + let disk = db::model::Disk::new( + disk_id, + authz_project.id(), + &create_params, + db::model::BlockSize::Traditional, + DiskRuntimeState::new(), + db::model::DiskType::Crucible, + ); + + let disk_type_crucible = db::model::DiskTypeCrucible::new( + disk_id, + VolumeUuid::new_v4(), + &create_params, + ); + let disk = db_datastore .project_create_disk( &opctx, &authz_project, - Disk::new( - Uuid::new_v4(), - authz_project.id(), - VolumeUuid::new_v4(), - params::DiskCreate { - identity: external::IdentityMetadataCreateParams { - name: "first-post".parse().unwrap(), - description: "just trying things out".to_string(), - }, - disk_source: params::DiskSource::Blank { - block_size: params::BlockSize::try_from(512) - .unwrap(), - }, - size: external::ByteCount::from(2147483648), - }, - db::model::BlockSize::Traditional, - DiskRuntimeState::new(), - ) - .unwrap(), + db::datastore::Disk::Crucible(db::datastore::CrucibleDisk { + disk, + disk_type_crucible, + }), ) .await .unwrap(); diff --git a/nexus/db-queries/src/db/datastore/mod.rs b/nexus/db-queries/src/db/datastore/mod.rs index 3667323d493..d2d61430b9a 100644 --- a/nexus/db-queries/src/db/datastore/mod.rs +++ b/nexus/db-queries/src/db/datastore/mod.rs @@ -129,6 +129,8 @@ pub use address_lot::AddressLotCreateResult; pub use db_metadata::DatastoreSetupAction; pub use db_metadata::ValidatedDatastoreSetupAction; pub use deployment::BlueprintLimitReachedOutput; +pub use disk::CrucibleDisk; +pub use disk::Disk; pub use dns::DataStoreDnsTest; pub use dns::DnsVersionUpdateBuilder; pub use ereport::EreportFilters; diff --git a/nexus/db-queries/src/db/datastore/volume.rs b/nexus/db-queries/src/db/datastore/volume.rs index b7a4ccf8562..7bc50bda275 100644 --- a/nexus/db-queries/src/db/datastore/volume.rs +++ b/nexus/db-queries/src/db/datastore/volume.rs @@ -864,11 +864,19 @@ impl DataStore { let (maybe_disk, maybe_instance) = { use nexus_db_schema::schema::disk::dsl as disk_dsl; + use nexus_db_schema::schema::disk_type_crucible::dsl as disk_type_crucible_dsl; use nexus_db_schema::schema::instance::dsl as instance_dsl; let maybe_disk: Option = disk_dsl::disk + .inner_join( + disk_type_crucible_dsl::disk_type_crucible + .on(disk_type_crucible_dsl::disk_id.eq(disk_dsl::id)), + ) .filter(disk_dsl::time_deleted.is_null()) - .filter(disk_dsl::volume_id.eq(to_db_typed_uuid(volume_id))) + .filter( + disk_type_crucible_dsl::volume_id + .eq(to_db_typed_uuid(volume_id)), + ) .select(Disk::as_select()) .get_result_async(conn) .await diff --git a/nexus/db-schema/src/enums.rs b/nexus/db-schema/src/enums.rs index 5a288abe2fb..849c780c985 100644 --- a/nexus/db-schema/src/enums.rs +++ b/nexus/db-schema/src/enums.rs @@ -39,6 +39,7 @@ define_enums! { ClickhouseModeEnum => "clickhouse_mode", DatasetKindEnum => "dataset_kind", DbMetadataNexusStateEnum => "db_metadata_nexus_state", + DiskTypeEnum => "disk_type", DnsGroupEnum => "dns_group", DownstairsClientStopRequestReasonEnum => "downstairs_client_stop_request_reason_type", DownstairsClientStoppedReasonEnum => "downstairs_client_stopped_reason_type", diff --git a/nexus/db-schema/src/schema.rs b/nexus/db-schema/src/schema.rs index ae62b1a327d..8409127a642 100644 --- a/nexus/db-schema/src/schema.rs +++ b/nexus/db-schema/src/schema.rs @@ -18,7 +18,6 @@ table! { time_deleted -> Nullable, rcgen -> Int8, project_id -> Uuid, - volume_id -> Uuid, disk_state -> Text, attach_instance_id -> Nullable, state_generation -> Int8, @@ -26,12 +25,27 @@ table! { slot -> Nullable, size_bytes -> Int8, block_size -> crate::enums::BlockSizeEnum, + disk_type -> crate::enums::DiskTypeEnum, + } +} + +table! { + disk_type_crucible (disk_id) { + disk_id -> Uuid, + volume_id -> Uuid, origin_snapshot -> Nullable, origin_image -> Nullable, pantry_address -> Nullable, } } +allow_tables_to_appear_in_same_query!(disk, disk_type_crucible); +allow_tables_to_appear_in_same_query!(volume, disk_type_crucible); +allow_tables_to_appear_in_same_query!( + disk_type_crucible, + virtual_provisioning_resource +); + table! { image (id) { id -> Uuid, diff --git a/nexus/src/app/disk.rs b/nexus/src/app/disk.rs index b922fd2aa0c..c6ed1a3bcac 100644 --- a/nexus/src/app/disk.rs +++ b/nexus/src/app/disk.rs @@ -12,6 +12,8 @@ use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore; +use omicron_common::api::external; use omicron_common::api::external::ByteCount; use omicron_common::api::external::CreateResult; use omicron_common::api::external::DeleteResult; @@ -24,7 +26,6 @@ use omicron_common::api::external::NameOrId; use omicron_common::api::external::UpdateResult; use omicron_common::api::external::http_pagination::PaginatedBy; use omicron_common::api::internal::nexus::DiskRuntimeState; -use sled_agent_client::Client as SledAgentClient; use std::sync::Arc; use uuid::Uuid; @@ -32,7 +33,6 @@ use super::MAX_DISK_SIZE_BYTES; use super::MIN_DISK_SIZE_BYTES; impl super::Nexus { - // Disks pub fn disk_lookup<'a>( &'a self, opctx: &'a OpContext, @@ -64,6 +64,19 @@ impl super::Nexus { } } + pub async fn disk_get( + &self, + opctx: &OpContext, + disk_selector: params::DiskSelector, + ) -> LookupResult { + let disk_lookup = self.disk_lookup(opctx, disk_selector)?; + + let (.., authz_disk) = + disk_lookup.lookup_for(authz::Action::Read).await?; + + self.db_datastore.disk_get(opctx, authz_disk.id()).await + } + pub(super) async fn validate_disk_create_params( self: &Arc, opctx: &OpContext, @@ -187,7 +200,7 @@ impl super::Nexus { opctx: &OpContext, project_lookup: &lookup::Project<'_>, params: ¶ms::DiskCreate, - ) -> CreateResult { + ) -> CreateResult { let (.., authz_project) = project_lookup.lookup_for(authz::Action::CreateChild).await?; self.validate_disk_create_params(opctx, &authz_project, params).await?; @@ -197,15 +210,18 @@ impl super::Nexus { project_id: authz_project.id(), create_params: params.clone(), }; + let saga_outputs = self .sagas .saga_execute::(saga_params) .await?; + let disk_created = saga_outputs - .lookup_node_output::("created_disk") + .lookup_node_output::("created_disk") .map_err(|e| Error::internal_error(&format!("{:#}", &e))) .internal_context("looking up output from disk create saga")?; - Ok(disk_created) + + Ok(db::datastore::Disk::Crucible(disk_created)) } pub(crate) async fn disk_list( @@ -213,53 +229,14 @@ impl super::Nexus { opctx: &OpContext, project_lookup: &lookup::Project<'_>, pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { + ) -> ListResultVec { let (.., authz_project) = project_lookup.lookup_for(authz::Action::ListChildren).await?; - self.db_datastore.disk_list(opctx, &authz_project, pagparams).await - } - - /// Modifies the runtime state of the Disk as requested. This generally - /// means attaching or detaching the disk. - // TODO(https://github.com/oxidecomputer/omicron/issues/811): - // This will be unused until we implement hot-plug support. - // However, it has been left for reference until then, as it will - // likely be needed once that feature is implemented. - #[allow(dead_code)] - pub(crate) async fn disk_set_runtime( - &self, - opctx: &OpContext, - authz_disk: &authz::Disk, - db_disk: &db::model::Disk, - sa: Arc, - requested: sled_agent_client::types::DiskStateRequested, - ) -> Result<(), Error> { - let runtime: DiskRuntimeState = db_disk.runtime().into(); - - opctx.authorize(authz::Action::Modify, authz_disk).await?; - - // Ask the Sled Agent to begin the state change. Then update the - // database to reflect the new intermediate state. - let new_runtime = sa - .disk_put( - &authz_disk.id(), - &sled_agent_client::types::DiskEnsureBody { - initial_runtime: - sled_agent_client::types::DiskRuntimeState::from( - runtime, - ), - target: requested, - }, - ) - .await - .map_err(Error::from)?; - - let new_runtime: DiskRuntimeState = new_runtime.into_inner().into(); - - self.db_datastore - .disk_update_runtime(opctx, authz_disk, &new_runtime.into()) - .await - .map(|_| ()) + let disks = self + .db_datastore + .disk_list(opctx, &authz_project, pagparams) + .await?; + Ok(disks.into_iter().map(Into::into).collect()) } pub(crate) async fn notify_disk_updated( @@ -327,20 +304,18 @@ impl super::Nexus { let (.., project, authz_disk) = disk_lookup.lookup_for(authz::Action::Delete).await?; - let (.., db_disk) = LookupPath::new(opctx, &self.db_datastore) - .disk_id(authz_disk.id()) - .fetch() - .await?; + let disk = self.datastore().disk_get(opctx, authz_disk.id()).await?; let saga_params = sagas::disk_delete::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), project_id: project.id(), - disk_id: authz_disk.id(), - volume_id: db_disk.volume_id(), + disk, }; + self.sagas .saga_execute::(saga_params) .await?; + Ok(()) } @@ -356,13 +331,15 @@ impl super::Nexus { // First get the internal volume ID that is stored in the disk // database entry, once we have that just call the volume method // to remove the read only parent. - let (.., db_disk) = LookupPath::new(opctx, &self.db_datastore) - .disk_id(disk_id) - .fetch() - .await?; - self.volume_remove_read_only_parent(&opctx, db_disk.volume_id()) - .await?; + let disk = self.datastore().disk_get(opctx, disk_id).await?; + + match disk { + datastore::Disk::Crucible(disk) => { + self.volume_remove_read_only_parent(&opctx, disk.volume_id()) + .await?; + } + } Ok(()) } @@ -380,6 +357,12 @@ impl super::Nexus { (.., authz_disk, db_disk) = disk_lookup.fetch_for(authz::Action::Modify).await?; + match db_disk.disk_type { + db::model::DiskType::Crucible => { + // ok + } + } + let disk_state: DiskState = db_disk.state().into(); match disk_state { DiskState::ImportReady => { @@ -405,17 +388,23 @@ impl super::Nexus { .map(|_| ()) } - /// Bulk write some bytes into a disk that's in state ImportingFromBulkWrites + /// Bulk write some bytes into a disk that's in state + /// ImportingFromBulkWrites pub(crate) async fn disk_manual_import( self: &Arc, + opctx: &OpContext, disk_lookup: &lookup::Disk<'_>, param: params::ImportBlocksBulkWrite, ) -> UpdateResult<()> { - let db_disk: db::model::Disk; + let (.., authz_disk) = + disk_lookup.lookup_for(authz::Action::Modify).await?; - (.., db_disk) = disk_lookup.fetch_for(authz::Action::Modify).await?; + let disk = + match self.datastore().disk_get(opctx, authz_disk.id()).await? { + db::datastore::Disk::Crucible(disk) => disk, + }; - let disk_state: DiskState = db_disk.state().into(); + let disk_state: DiskState = disk.state().into(); match disk_state { DiskState::ImportingFromBulkWrites => { // ok @@ -423,13 +412,13 @@ impl super::Nexus { _ => { return Err(Error::invalid_request(&format!( - "cannot import blocks with a bulk write for disk in state {:?}", - disk_state, + "cannot import blocks with a bulk write for disk in state \ + {disk_state:?}", ))); } } - if let Some(endpoint) = db_disk.pantry_address() { + if let Some(endpoint) = disk.pantry_address() { let data: Vec = base64::Engine::decode( &base64::engine::general_purpose::STANDARD, ¶m.base64_encoded_data, @@ -443,11 +432,11 @@ impl super::Nexus { info!( self.log, - "bulk write of {} bytes to offset {} of disk {} using pantry endpoint {:?}", + "bulk write of {} bytes to offset {} of disk {} using pantry \ + endpoint {endpoint:?}", data.len(), param.offset, - db_disk.id(), - endpoint, + disk.id(), ); // The the disk state can change between the check above and here @@ -495,10 +484,8 @@ impl super::Nexus { base64_encoded_data: param.base64_encoded_data, }; - client - .bulk_write(&db_disk.id().to_string(), &request) - .await - .map_err(|e| match e { + client.bulk_write(&disk.id().to_string(), &request).await.map_err( + |e| match e { crucible_pantry_client::Error::ErrorResponse(rv) => { match rv.status() { status if status.is_client_error() => { @@ -513,14 +500,15 @@ impl super::Nexus { "error sending bulk write to pantry: {}", e, )), - })?; + }, + )?; Ok(()) } else { - error!(self.log, "disk {} has no pantry address!", db_disk.id()); + error!(self.log, "disk {} has no pantry address!", disk.id()); Err(Error::internal_error(&format!( "disk {} has no pantry address!", - db_disk.id(), + disk.id(), ))) } } @@ -539,6 +527,12 @@ impl super::Nexus { (.., authz_disk, db_disk) = disk_lookup.fetch_for(authz::Action::Modify).await?; + match db_disk.disk_type { + db::model::DiskType::Crucible => { + // ok + } + } + let disk_state: DiskState = db_disk.state().into(); match disk_state { DiskState::ImportingFromBulkWrites => { @@ -572,14 +566,19 @@ impl super::Nexus { disk_lookup: &lookup::Disk<'_>, finalize_params: ¶ms::FinalizeDisk, ) -> UpdateResult<()> { - let (authz_silo, authz_proj, authz_disk) = - disk_lookup.lookup_for(authz::Action::Modify).await?; + let (authz_silo, authz_proj, authz_disk, _db_disk) = + disk_lookup.fetch_for(authz::Action::Modify).await?; + + let disk: datastore::CrucibleDisk = + match self.datastore().disk_get(&opctx, authz_disk.id()).await? { + datastore::Disk::Crucible(disk) => disk, + }; let saga_params = sagas::finalize_disk::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id: authz_silo.id(), project_id: authz_proj.id(), - disk_id: authz_disk.id(), + disk, snapshot_name: finalize_params.snapshot_name.clone(), }; diff --git a/nexus/src/app/instance.rs b/nexus/src/app/instance.rs index 38a648472df..0bbd1012b14 100644 --- a/nexus/src/app/instance.rs +++ b/nexus/src/app/instance.rs @@ -1443,7 +1443,7 @@ impl super::Nexus { opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, pagparams: &PaginatedBy<'_>, - ) -> ListResultVec { + ) -> ListResultVec { let (.., authz_instance) = instance_lookup.lookup_for(authz::Action::ListChildren).await?; self.db_datastore @@ -1457,9 +1457,10 @@ impl super::Nexus { opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, disk: NameOrId, - ) -> UpdateResult { + ) -> UpdateResult { let (.., authz_project, authz_instance) = instance_lookup.lookup_for(authz::Action::Modify).await?; + let (.., authz_project_disk, authz_disk) = self .disk_lookup( opctx, @@ -1508,6 +1509,7 @@ impl super::Nexus { MAX_DISKS_PER_INSTANCE, ) .await?; + Ok(disk) } @@ -1517,7 +1519,7 @@ impl super::Nexus { opctx: &OpContext, instance_lookup: &lookup::Instance<'_>, disk: NameOrId, - ) -> UpdateResult { + ) -> UpdateResult { let (.., authz_project, authz_instance) = instance_lookup.lookup_for(authz::Action::Modify).await?; let (.., authz_disk) = self diff --git a/nexus/src/app/instance_platform/mod.rs b/nexus/src/app/instance_platform/mod.rs index 2760e0e5588..ccba6ea09c7 100644 --- a/nexus/src/app/instance_platform/mod.rs +++ b/nexus/src/app/instance_platform/mod.rs @@ -70,21 +70,23 @@ // CPU platforms are broken out only because they're wordy. mod cpu_platform; -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; +use std::collections::BTreeSet; +use std::collections::HashMap; use crate::app::instance::InstanceRegisterReason; use crate::cidata::InstanceCiData; +use crate::db::datastore::Disk; use nexus_db_queries::db; -use nexus_types::identity::Resource; use omicron_common::api::external::Error; use omicron_common::api::internal::shared::NetworkInterface; use sled_agent_client::types::{ BlobStorageBackend, Board, BootOrderEntry, BootSettings, Chipset, - ComponentV0, Cpuid, CpuidVendor, CrucibleStorageBackend, I440Fx, - InstanceSpecV0, NvmeDisk, PciPath, QemuPvpanic, SerialPort, - SerialPortNumber, SpecKey, VirtioDisk, VirtioNetworkBackend, VirtioNic, - VmmSpec, + ComponentV0, Cpuid, CpuidVendor, CrucibleStorageBackend, + FileStorageBackend, I440Fx, InstanceSpecV0, NvmeDisk, PciPath, QemuPvpanic, + SerialPort, SerialPortNumber, SpecKey, VirtioDisk, VirtioNetworkBackend, + VirtioNic, VmmSpec, }; use uuid::Uuid; @@ -174,73 +176,107 @@ pub fn zero_padded_nvme_serial_from_str(s: &str) -> [u8; 20] { sn } -/// Describes a Crucible-backed disk that should be added to an instance -/// specification. -#[derive(Debug)] -struct CrucibleDisk { +/// Describes a Crucible-backed or file-backed disk that should be added to an +/// instance specification. +pub struct DiskComponents { device_name: String, device: ComponentV0, - backend: CrucibleStorageBackend, + backend: ComponentV0, } -/// Stores a mapping from Nexus disk IDs to Crucible disk descriptors. This +/// Stores a mapping from Nexus disk IDs to disk component descriptors. This /// allows the platform construction process to quickly determine the *device /// name* for a disk with a given ID so that that name can be inserted into the /// instance spec's boot settings. -struct DisksById(BTreeMap); +struct DisksById(BTreeMap); -impl DisksById { - /// Creates a disk list from an iterator over a set of disk and volume - /// records. - /// - /// The caller must ensure that the supplied `Volume`s have been checked out - /// (i.e., that their Crucible generation numbers are up-to-date) before - /// calling this function. - fn from_disks<'i>( - disks: impl Iterator, - ) -> Result { - let mut map = BTreeMap::new(); - for (disk, volume) in disks { - let slot = match disk.slot { - Some(s) => s.0, - None => { - return Err(Error::internal_error(&format!( - "disk {} is attached but has no PCI slot assignment", - disk.id() - ))); - } - }; - - let pci_path = slot_to_pci_bdf(slot, PciDeviceKind::Disk)?; - let device = ComponentV0::NvmeDisk(NvmeDisk { - backend_id: SpecKey::Uuid(disk.id()), - pci_path, - serial_number: zero_padded_nvme_serial_from_str( - disk.name().as_str(), - ), - }); +struct DisksByIdBuilder { + map: BTreeMap, + slot_usage: BTreeSet, +} + +impl DisksByIdBuilder { + fn new() -> Self { + Self { map: BTreeMap::new(), slot_usage: BTreeSet::new() } + } + + fn add_generic_disk( + &mut self, + disk: &Disk, + backend: ComponentV0, + ) -> Result<(), Error> { + let slot = disk.slot().ok_or(Error::internal_error(&format!( + "disk {} is attached but has no PCI slot assignment", + disk.id() + )))?; + + if self.slot_usage.contains(&slot) { + return Err(Error::internal_error(&format!( + "disk PCI slot {slot} used more than once, cannot attach {}", + disk.id(), + ))); + } + + self.slot_usage.insert(slot); + + let pci_path = slot_to_pci_bdf(slot, PciDeviceKind::Disk)?; - let backend = CrucibleStorageBackend { + let device = ComponentV0::NvmeDisk(NvmeDisk { + backend_id: SpecKey::Uuid(disk.id()), + pci_path, + serial_number: zero_padded_nvme_serial_from_str( + disk.name().as_str(), + ), + }); + + let device_name = component_names::device_name_from_id(&disk.id()); + + let components = DiskComponents { device, device_name, backend }; + + if self.map.insert(disk.id(), components).is_some() { + return Err(Error::internal_error(&format!( + "instance has multiple attached disks with ID {}", + disk.id() + ))); + } + + Ok(()) + } + + fn add_crucible_disk( + &mut self, + disk: &Disk, + volume: &db::model::Volume, + ) -> Result<(), Error> { + let backend = + ComponentV0::CrucibleStorageBackend(CrucibleStorageBackend { readonly: false, request_json: volume.data().to_owned(), - }; - - let device_name = component_names::device_name_from_id(&disk.id()); - if map - .insert( - disk.id(), - CrucibleDisk { device_name, device, backend }, - ) - .is_some() - { - return Err(Error::internal_error(&format!( - "instance has multiple attached disks with ID {}", - disk.id() - ))); - } - } + }); + + self.add_generic_disk(disk, backend) + } + + #[allow(unused)] // for now! + fn add_file_backed_disk( + &mut self, + disk: &Disk, + path: String, + ) -> Result<(), Error> { + let backend = ComponentV0::FileStorageBackend(FileStorageBackend { + path, + readonly: false, + block_size: disk.block_size().to_bytes(), + workers: None, + }); + + self.add_generic_disk(disk, backend) + } +} - Ok(Self(map)) +impl From for DisksById { + fn from(v: DisksByIdBuilder) -> DisksById { + DisksById(v.map) } } @@ -325,14 +361,13 @@ impl Components { // This operation will add a device and a backend for every disk in the // input set. self.0.reserve(disks.0.len() * 2); - for (id, CrucibleDisk { device_name, device, backend }) in - disks.0.into_iter() - { + + for (id, disk_components) in disks.0.into_iter() { + let DiskComponents { device_name, device, backend } = + disk_components; + self.add(device_name, device)?; - self.add( - id.to_string(), - ComponentV0::CrucibleStorageBackend(backend), - )?; + self.add(id.to_string(), backend)?; } Ok(()) @@ -411,7 +446,7 @@ impl super::Nexus { reason: &InstanceRegisterReason, instance: &db::model::Instance, vmm: &db::model::Vmm, - disks: &[db::model::Disk], + disks: &[db::datastore::Disk], nics: &[NetworkInterface], ssh_keys: &[db::model::SshKey], ) -> Result { @@ -422,39 +457,45 @@ impl super::Nexus { ) })?; - let mut components = Components::default(); + let mut builder = DisksByIdBuilder::new(); - // Get the volume information needed to fill in the disks' backends' - // volume construction requests. Calling `volume_checkout` bumps - // the volumes' generation numbers. - let mut volumes = Vec::with_capacity(disks.len()); for disk in disks { - use db::datastore::VolumeCheckoutReason; - let volume = self - .db_datastore - .volume_checkout( - disk.volume_id(), - match reason { - InstanceRegisterReason::Start { vmm_id } => { - VolumeCheckoutReason::InstanceStart { - vmm_id: *vmm_id, - } - } - InstanceRegisterReason::Migrate { - vmm_id, - target_vmm_id, - } => VolumeCheckoutReason::InstanceMigrate { - vmm_id: *vmm_id, - target_vmm_id: *target_vmm_id, - }, - }, - ) - .await?; - - volumes.push(volume); + match &disk { + db::datastore::Disk::Crucible(crucible_disk) => { + // Get the volume information needed to fill in the disks' + // backends' volume construction requests. Calling + // `volume_checkout` bumps the volumes' generation numbers. + + use db::datastore::VolumeCheckoutReason; + + let volume = self + .db_datastore + .volume_checkout( + crucible_disk.volume_id(), + match reason { + InstanceRegisterReason::Start { vmm_id } => { + VolumeCheckoutReason::InstanceStart { + vmm_id: *vmm_id, + } + } + InstanceRegisterReason::Migrate { + vmm_id, + target_vmm_id, + } => VolumeCheckoutReason::InstanceMigrate { + vmm_id: *vmm_id, + target_vmm_id: *target_vmm_id, + }, + }, + ) + .await?; + + builder.add_crucible_disk(disk, &volume)?; + } + } } - let disks = DisksById::from_disks(disks.iter().zip(volumes.iter()))?; + let disks: DisksById = builder.into(); + let mut components = Components::default(); // Add the instance's boot settings. Propolis expects boot order entries // that specify disks to refer to the *device* components of those @@ -462,6 +503,9 @@ impl super::Nexus { // module's selected device name for the appropriate disk. if let Some(boot_disk_id) = instance.boot_disk_id { if let Some(disk) = disks.0.get(&boot_disk_id) { + // XXX here is where we would restrict the type of disk used for + // a boot disk. should we? + let entry = BootOrderEntry { id: SpecKey::Name(disk.device_name.clone()), }; diff --git a/nexus/src/app/sagas/common_storage.rs b/nexus/src/app/sagas/common_storage.rs index 81852cebd97..956ea96a307 100644 --- a/nexus/src/app/sagas/common_storage.rs +++ b/nexus/src/app/sagas/common_storage.rs @@ -14,6 +14,7 @@ use nexus_db_lookup::LookupPath; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore::CrucibleDisk; use omicron_common::api::external::Error; use omicron_common::progenitor_operation_retry::ProgenitorOperationRetry; use omicron_common::progenitor_operation_retry::ProgenitorOperationRetryError; @@ -74,11 +75,14 @@ pub(crate) async fn call_pantry_attach_for_disk( log: &slog::Logger, opctx: &OpContext, nexus: &Nexus, - disk_id: Uuid, + disk: &CrucibleDisk, pantry_address: SocketAddrV6, ) -> Result<(), ActionError> { - let (.., disk) = LookupPath::new(opctx, nexus.datastore()) - .disk_id(disk_id) + // Perform an authz check but use the argument CrucibleDisk later in the + // function + + let (.., _disk) = LookupPath::new(opctx, nexus.datastore()) + .disk_id(disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -104,7 +108,7 @@ pub(crate) async fn call_pantry_attach_for_disk( call_pantry_attach_for_volume( log, nexus, - disk_id, + disk.id(), volume_construction_request, pantry_address, ) diff --git a/nexus/src/app/sagas/disk_create.rs b/nexus/src/app/sagas/disk_create.rs index 7483452e0f8..ee9abba070a 100644 --- a/nexus/src/app/sagas/disk_create.rs +++ b/nexus/src/app/sagas/disk_create.rs @@ -129,7 +129,7 @@ impl NexusSaga for SagaDiskCreate { async fn sdc_create_disk_record( sagactx: NexusActionContext, -) -> Result { +) -> Result { let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; @@ -191,14 +191,20 @@ async fn sdc_create_disk_record( let disk = db::model::Disk::new( disk_id, params.project_id, - volume_id, - params.create_params.clone(), + ¶ms.create_params, block_size, db::model::DiskRuntimeState::new(), - ) - .map_err(|e| { - ActionError::action_failed(Error::invalid_request(&e.to_string())) - })?; + db::model::DiskType::Crucible, + ); + + let disk_type_crucible = db::model::DiskTypeCrucible::new( + disk_id, + volume_id, + ¶ms.create_params, + ); + + let crucible_disk = + db::datastore::CrucibleDisk { disk, disk_type_crucible }; let (.., authz_project) = LookupPath::new(&opctx, osagactx.datastore()) .project_id(params.project_id) @@ -206,13 +212,17 @@ async fn sdc_create_disk_record( .await .map_err(ActionError::action_failed)?; - let disk_created = osagactx + osagactx .datastore() - .project_create_disk(&opctx, &authz_project, disk) + .project_create_disk( + &opctx, + &authz_project, + db::datastore::Disk::Crucible(crucible_disk.clone()), + ) .await .map_err(ActionError::action_failed)?; - Ok(disk_created) + Ok(crucible_disk) } async fn sdc_create_disk_record_undo( @@ -294,7 +304,8 @@ async fn sdc_account_space( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let disk_created = sagactx.lookup::("created_disk")?; + let disk_created = + sagactx.lookup::("created_disk")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, @@ -305,7 +316,7 @@ async fn sdc_account_space( &opctx, disk_created.id(), params.project_id, - disk_created.size, + disk_created.size(), ) .await .map_err(ActionError::action_failed)?; @@ -318,7 +329,8 @@ async fn sdc_account_space_undo( let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let disk_created = sagactx.lookup::("created_disk")?; + let disk_created = + sagactx.lookup::("created_disk")?; let opctx = crate::context::op_context_for_saga_action( &sagactx, ¶ms.serialized_authn, @@ -329,7 +341,7 @@ async fn sdc_account_space_undo( &opctx, disk_created.id(), params.project_id, - disk_created.size, + disk_created.size(), ) .await .map_err(ActionError::action_failed)?; @@ -644,7 +656,8 @@ async fn sdc_finalize_disk_record( ); let disk_id = sagactx.lookup::("disk_id")?; - let disk_created = sagactx.lookup::("created_disk")?; + let disk_created = + sagactx.lookup::("created_disk")?; let (.., authz_disk) = LookupPath::new(&opctx, datastore) .disk_id(disk_id) .lookup_for(authz::Action::Modify) @@ -735,7 +748,8 @@ async fn sdc_call_pantry_attach_for_disk( &sagactx, ¶ms.serialized_authn, ); - let disk_id = sagactx.lookup::("disk_id")?; + let disk_created = + sagactx.lookup::("created_disk")?; let pantry_address = sagactx.lookup::("pantry_address")?; @@ -743,7 +757,7 @@ async fn sdc_call_pantry_attach_for_disk( &log, &opctx, &osagactx.nexus(), - disk_id, + &disk_created, pantry_address, ) .await?; @@ -827,8 +841,10 @@ pub(crate) mod test { use diesel::{ ExpressionMethods, OptionalExtension, QueryDsl, SelectableHelper, }; + use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; - use nexus_db_queries::{authn::saga::Serialized, db::datastore::DataStore}; + use nexus_db_queries::db; + use nexus_db_queries::db::datastore::DataStore; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils_macros::nexus_test; use omicron_common::api::external::ByteCount; @@ -891,11 +907,9 @@ pub(crate) mod test { let output = nexus.sagas.saga_execute::(params).await.unwrap(); let disk = output - .lookup_node_output::( - "created_disk", - ) + .lookup_node_output::("created_disk") .unwrap(); - assert_eq!(disk.project_id, project_id); + assert_eq!(disk.project_id(), project_id); } async fn no_disk_records_exist(datastore: &DataStore) -> bool { diff --git a/nexus/src/app/sagas/disk_delete.rs b/nexus/src/app/sagas/disk_delete.rs index 1af2e53fd3a..f6fe68151fe 100644 --- a/nexus/src/app/sagas/disk_delete.rs +++ b/nexus/src/app/sagas/disk_delete.rs @@ -10,8 +10,8 @@ use crate::app::sagas::declare_saga_actions; use crate::app::sagas::volume_delete; use nexus_db_queries::authn; use nexus_db_queries::db; +use nexus_db_queries::db::datastore; use omicron_common::api::external::DiskState; -use omicron_uuid_kinds::VolumeUuid; use serde::Deserialize; use serde::Serialize; use steno::ActionError; @@ -24,8 +24,7 @@ use uuid::Uuid; pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, pub project_id: Uuid, - pub disk_id: Uuid, - pub volume_id: VolumeUuid, + pub disk: datastore::Disk, } // disk delete saga: actions @@ -61,36 +60,41 @@ impl NexusSaga for SagaDiskDelete { builder.append(delete_disk_record_action()); builder.append(space_account_action()); - let subsaga_params = volume_delete::Params { - serialized_authn: params.serialized_authn.clone(), - volume_id: params.volume_id, - }; - - let subsaga_dag = { - let subsaga_builder = steno::DagBuilder::new(steno::SagaName::new( - volume_delete::SagaVolumeDelete::NAME, - )); - volume_delete::SagaVolumeDelete::make_saga_dag( - &subsaga_params, - subsaga_builder, - )? - }; - - builder.append(Node::constant( - "params_for_volume_delete_subsaga", - serde_json::to_value(&subsaga_params).map_err(|e| { - SagaInitError::SerializeError( - "params_for_volume_delete_subsaga".to_string(), - e, - ) - })?, - )); - - builder.append(Node::subsaga( - "volume_delete_subsaga_no_result", - subsaga_dag, - "params_for_volume_delete_subsaga", - )); + match ¶ms.disk { + datastore::Disk::Crucible(disk) => { + let subsaga_params = volume_delete::Params { + serialized_authn: params.serialized_authn.clone(), + volume_id: disk.volume_id(), + }; + + let subsaga_dag = { + let subsaga_builder = + steno::DagBuilder::new(steno::SagaName::new( + volume_delete::SagaVolumeDelete::NAME, + )); + volume_delete::SagaVolumeDelete::make_saga_dag( + &subsaga_params, + subsaga_builder, + )? + }; + + builder.append(Node::constant( + "params_for_volume_delete_subsaga", + serde_json::to_value(&subsaga_params).map_err(|e| { + SagaInitError::SerializeError( + "params_for_volume_delete_subsaga".to_string(), + e, + ) + })?, + )); + + builder.append(Node::subsaga( + "volume_delete_subsaga_no_result", + subsaga_dag, + "params_for_volume_delete_subsaga", + )); + } + } Ok(builder.build()?) } @@ -107,7 +111,7 @@ async fn sdd_delete_disk_record( let disk = osagactx .datastore() .project_delete_disk_no_auth( - ¶ms.disk_id, + ¶ms.disk.id(), &[DiskState::Detached, DiskState::Faulted], ) .await @@ -124,7 +128,7 @@ async fn sdd_delete_disk_record_undo( osagactx .datastore() - .project_undelete_disk_set_faulted_no_auth(¶ms.disk_id) + .project_undelete_disk_set_faulted_no_auth(¶ms.disk.id()) .await .map_err(ActionError::action_failed)?; @@ -185,9 +189,9 @@ pub(crate) mod test { app::saga::create_saga_dag, app::sagas::disk_delete::Params, app::sagas::disk_delete::SagaDiskDelete, }; - use nexus_db_model::Disk; use nexus_db_queries::authn::saga::Serialized; use nexus_db_queries::context::OpContext; + use nexus_db_queries::db::datastore::Disk; use nexus_test_utils::resource_helpers::DiskTest; use nexus_test_utils::resource_helpers::create_project; use nexus_test_utils_macros::nexus_test; @@ -213,6 +217,7 @@ pub(crate) mod test { let project_selector = params::ProjectSelector { project: Name::try_from(PROJECT_NAME.to_string()).unwrap().into(), }; + let project_lookup = nexus.project_lookup(&opctx, project_selector).unwrap(); @@ -242,9 +247,9 @@ pub(crate) mod test { let params = Params { serialized_authn: Serialized::for_opctx(&opctx), project_id, - disk_id: disk.id(), - volume_id: disk.volume_id(), + disk, }; + nexus.sagas.saga_execute::(params).await.unwrap(); } @@ -264,9 +269,9 @@ pub(crate) mod test { let params = Params { serialized_authn: Serialized::for_opctx(&opctx), project_id, - disk_id: disk.id(), - volume_id: disk.volume_id(), + disk, }; + let dag = create_saga_dag::(params).unwrap(); crate::app::sagas::test_helpers::actions_succeed_idempotently( nexus, dag, diff --git a/nexus/src/app/sagas/finalize_disk.rs b/nexus/src/app/sagas/finalize_disk.rs index ae418903d3b..98e85c3faa5 100644 --- a/nexus/src/app/sagas/finalize_disk.rs +++ b/nexus/src/app/sagas/finalize_disk.rs @@ -15,7 +15,7 @@ use crate::app::sagas::snapshot_create; use crate::external_api::params; use nexus_db_lookup::LookupPath; use nexus_db_model::Generation; -use nexus_db_queries::{authn, authz}; +use nexus_db_queries::{authn, authz, db::datastore}; use omicron_common::api::external; use omicron_common::api::external::Error; use omicron_common::api::external::Name; @@ -33,7 +33,7 @@ pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, pub silo_id: Uuid, pub project_id: Uuid, - pub disk_id: Uuid, + pub disk: datastore::CrucibleDisk, pub snapshot_name: Option, } @@ -80,7 +80,7 @@ impl NexusSaga for SagaFinalizeDisk { serialized_authn: params.serialized_authn.clone(), silo_id: params.silo_id, project_id: params.project_id, - disk_id: params.disk_id, + disk: params.disk.clone(), attach_instance_id: None, use_the_pantry: true, create_params: params::SnapshotCreate { @@ -88,10 +88,10 @@ impl NexusSaga for SagaFinalizeDisk { name: snapshot_name.clone(), description: format!( "snapshot of finalized disk {}", - params.disk_id + params.disk.id() ), }, - disk: params.disk_id.into(), + disk: params.disk.id().into(), }, }; @@ -146,7 +146,7 @@ async fn sfd_set_finalizing_state( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -169,7 +169,7 @@ async fn sfd_set_finalizing_state( // will be important later to *only* transition this disk out of finalizing // if the generation number matches what *this* saga is doing. let (.., db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Read) .await .map_err(ActionError::action_failed)?; @@ -197,7 +197,7 @@ async fn sfd_set_finalizing_state_undo( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -215,7 +215,7 @@ async fn sfd_set_finalizing_state_undo( info!( log, "undo: setting disk {} state from finalizing to import_ready", - params.disk_id + params.disk.id() ); osagactx @@ -231,7 +231,7 @@ async fn sfd_set_finalizing_state_undo( info!( log, "disk {} has generation number {:?}, which doesn't match the expected {:?}: skip setting to import_ready", - params.disk_id, + params.disk.id(), db_disk.runtime().gen, expected_disk_generation_number, ); @@ -239,7 +239,7 @@ async fn sfd_set_finalizing_state_undo( } external::DiskState::ImportReady => { - info!(log, "disk {} already import_ready", params.disk_id); + info!(log, "disk {} already import_ready", params.disk.id()); } _ => { @@ -261,18 +261,29 @@ async fn sfd_get_pantry_address( ¶ms.serialized_authn, ); + // Re-fetch the disk, and use the lookup first to check for modify + // permission. let (.., db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; + let disk = match osagactx + .datastore() + .disk_get(&opctx, params.disk.id()) + .await + .map_err(ActionError::action_failed)? + { + datastore::Disk::Crucible(disk) => disk, + }; + // At any stage of executing this saga, if the disk moves from state // importing to detached, it will be detached from the corresponding Pantry. // Any subsequent saga nodes will fail because the pantry address is stored // as part of the saga state, and requests sent to that Pantry with the // disk's id will fail. - let pantry_address = db_disk.pantry_address().ok_or_else(|| { + let pantry_address = disk.pantry_address().ok_or_else(|| { ActionError::action_failed(String::from("disk not attached to pantry!")) })?; @@ -291,7 +302,7 @@ async fn sfd_call_pantry_detach_for_disk( match call_pantry_detach( sagactx.user_data().nexus(), &log, - params.disk_id, + params.disk.id(), pantry_address, ) .await @@ -323,10 +334,10 @@ async fn sfd_clear_pantry_address( ¶ms.serialized_authn, ); - info!(log, "setting disk {} pantry to None", params.disk_id); + info!(log, "setting disk {} pantry to None", params.disk.id()); let (.., authz_disk) = LookupPath::new(&opctx, datastore) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .lookup_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -352,7 +363,7 @@ async fn sfd_set_detached_state( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -366,7 +377,7 @@ async fn sfd_set_detached_state( info!( log, "setting disk {} state from finalizing to detached", - params.disk_id + params.disk.id() ); osagactx @@ -382,7 +393,7 @@ async fn sfd_set_detached_state( info!( log, "disk {} has generation number {:?}, which doesn't match the expected {:?}: skip setting to detached", - params.disk_id, + params.disk.id(), db_disk.runtime().gen, expected_disk_generation_number, ); @@ -390,7 +401,7 @@ async fn sfd_set_detached_state( } external::DiskState::Detached => { - info!(log, "disk {} already detached", params.disk_id); + info!(log, "disk {} already detached", params.disk.id()); } _ => { diff --git a/nexus/src/app/sagas/region_replacement_drive.rs b/nexus/src/app/sagas/region_replacement_drive.rs index b0989bbf75f..44a371bd9cf 100644 --- a/nexus/src/app/sagas/region_replacement_drive.rs +++ b/nexus/src/app/sagas/region_replacement_drive.rs @@ -140,6 +140,7 @@ use super::{ ACTION_GENERATE_ID, ActionRegistry, NexusActionContext, NexusSaga, SagaInitError, }; +use crate::app::db::datastore::CrucibleDisk; use crate::app::db::datastore::InstanceAndActiveVmm; use crate::app::sagas::common_storage::get_pantry_address; use crate::app::sagas::declare_saga_actions; @@ -818,7 +819,7 @@ enum DriveAction { /// If the Volume is currently running in a Propolis server, then send the /// volume replacement request there. - Propolis { step: db::model::RegionReplacementStep, disk: db::model::Disk }, + Propolis { step: db::model::RegionReplacementStep, disk: CrucibleDisk }, } async fn srrd_drive_region_replacement_prepare( @@ -1020,7 +1021,7 @@ async fn srrd_drive_region_replacement_prepare( // The disk is not attached to an instance. Is it attached to a // Pantry right now (aka performing bulk import)? - if let Some(address) = &disk.pantry_address { + if let Some(address) = &disk.pantry_address() { // TODO currently unsupported return Err(ActionError::action_failed(format!( "disk {} attached to {address}, not supported", @@ -1480,7 +1481,7 @@ async fn execute_propolis_drive_action( step_vmm_id: Uuid, vmm: db::model::Vmm, client: propolis_client::Client, - disk: db::model::Disk, + disk: CrucibleDisk, disk_new_volume_vcr: String, ) -> Result { // This client could be for a different VMM than the step was diff --git a/nexus/src/app/sagas/region_replacement_start.rs b/nexus/src/app/sagas/region_replacement_start.rs index eb7701c96e5..0230abdbc01 100644 --- a/nexus/src/app/sagas/region_replacement_start.rs +++ b/nexus/src/app/sagas/region_replacement_start.rs @@ -764,14 +764,13 @@ async fn srrs_update_request_record( pub(crate) mod test { use crate::{ app::RegionAllocationStrategy, app::db::DataStore, - app::saga::create_saga_dag, + app::db::datastore::Disk, app::saga::create_saga_dag, app::sagas::region_replacement_start::Params, app::sagas::region_replacement_start::SagaRegionReplacementStart, app::sagas::region_replacement_start::find_only_new_region, app::sagas::test_helpers::test_opctx, }; use chrono::Utc; - use nexus_db_lookup::LookupPath; use nexus_db_model::CrucibleDataset; use nexus_db_model::Region; use nexus_db_model::RegionReplacement; @@ -820,14 +819,12 @@ pub(crate) mod test { // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let Disk::Crucible(disk) = + datastore.disk_get(&opctx, disk_id).await.unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); + datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); // Replace one of the disk's regions @@ -876,7 +873,7 @@ pub(crate) mod test { // Validate number of regions for disk didn't change let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); + datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); // Validate that one of the regions for the disk is the new one @@ -1137,14 +1134,12 @@ pub(crate) mod test { let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let Disk::Crucible(disk) = + datastore.disk_get(&opctx, disk_id).await.unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); + datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); let region_to_replace: &Region = &allocated_regions[0].1; @@ -1212,14 +1207,12 @@ pub(crate) mod test { let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let Disk::Crucible(disk) = + datastore.disk_get(&opctx, disk_id).await.unwrap(); let allocated_regions = - datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); + datastore.get_allocated_regions(disk.volume_id()).await.unwrap(); assert_eq!(allocated_regions.len(), 3); let region_to_replace: &Region = &allocated_regions[0].1; diff --git a/nexus/src/app/sagas/region_snapshot_replacement_start.rs b/nexus/src/app/sagas/region_snapshot_replacement_start.rs index d3cf05af796..e0429eee846 100644 --- a/nexus/src/app/sagas/region_snapshot_replacement_start.rs +++ b/nexus/src/app/sagas/region_snapshot_replacement_start.rs @@ -1219,7 +1219,7 @@ async fn rsrss_update_request_record( pub(crate) mod test { use crate::{ app::RegionAllocationStrategy, app::db::DataStore, - app::saga::create_saga_dag, + app::db::datastore::Disk, app::saga::create_saga_dag, app::sagas::region_snapshot_replacement_start::*, app::sagas::test_helpers::test_opctx, }; @@ -1275,11 +1275,9 @@ pub(crate) mod test { assert_eq!(region_allocations(&datastore).await, 3); let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk_id).await.unwrap(); // Create a snapshot let snapshot = @@ -1301,7 +1299,7 @@ pub(crate) mod test { } struct PrepareResult<'a> { - db_disk: nexus_db_model::Disk, + db_disk: db::datastore::CrucibleDisk, snapshot: views::Snapshot, db_snapshot: nexus_db_model::Snapshot, disk_test: DiskTest<'a, crate::Server>, @@ -1859,11 +1857,8 @@ pub(crate) mod test { create_snapshot(&client, PROJECT_NAME, "disk", "snap").await; // Before expunging any physical disk, save some DB models - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2017,11 +2012,8 @@ pub(crate) mod test { create_snapshot(&client, PROJECT_NAME, "disk", "snap").await; // Before expunging any physical disk, save some DB models - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) diff --git a/nexus/src/app/sagas/snapshot_create.rs b/nexus/src/app/sagas/snapshot_create.rs index b889eb43940..3b476e5a59b 100644 --- a/nexus/src/app/sagas/snapshot_create.rs +++ b/nexus/src/app/sagas/snapshot_create.rs @@ -134,7 +134,7 @@ pub(crate) struct Params { pub serialized_authn: authn::saga::Serialized, pub silo_id: Uuid, pub project_id: Uuid, - pub disk_id: Uuid, + pub disk: db::datastore::CrucibleDisk, pub attach_instance_id: Option, pub use_the_pantry: bool, pub create_params: params::SnapshotCreate, @@ -384,15 +384,9 @@ async fn ssc_take_volume_lock( // is blocked by a snapshot being created, and snapshot creation is blocked // by region replacement. - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() - .await - .map_err(ActionError::action_failed)?; - osagactx .datastore() - .volume_repair_lock(&opctx, disk.volume_id(), lock_id) + .volume_repair_lock(&opctx, params.disk.volume_id(), lock_id) .await .map_err(ActionError::action_failed)?; @@ -411,14 +405,9 @@ async fn ssc_take_volume_lock_undo( ¶ms.serialized_authn, ); - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() - .await?; - osagactx .datastore() - .volume_repair_unlock(&opctx, disk.volume_id(), lock_id) + .volume_repair_unlock(&opctx, params.disk.volume_id(), lock_id) .await?; Ok(()) @@ -449,7 +438,7 @@ async fn ssc_alloc_regions( ); let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -661,7 +650,7 @@ async fn ssc_create_snapshot_record( info!(log, "grabbing disk by name {}", params.create_params.disk); let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch() .await .map_err(ActionError::action_failed)?; @@ -840,7 +829,7 @@ async fn ssc_send_snapshot_request_to_sled_agent( }; info!(log, "asking for disk snapshot from Propolis via sled agent"; - "disk_id" => %params.disk_id, + "disk_id" => %params.disk.id(), "instance_id" => %attach_instance_id, "propolis_id" => %propolis_id, "sled_id" => %sled_id); @@ -855,7 +844,7 @@ async fn ssc_send_snapshot_request_to_sled_agent( sled_agent_client .vmm_issue_disk_snapshot_request( &PropolisUuid::from_untyped_uuid(propolis_id), - ¶ms.disk_id, + ¶ms.disk.id(), &VmmIssueDiskSnapshotRequestBody { snapshot_id }, ) .await @@ -886,21 +875,15 @@ async fn ssc_send_snapshot_request_to_sled_agent_undo( let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); let snapshot_id = sagactx.lookup::("snapshot_id")?; info!(log, "Undoing snapshot request for {snapshot_id}"); // Lookup the regions used by the source disk... - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() + let datasets_and_regions = osagactx + .datastore() + .get_allocated_regions(params.disk.volume_id()) .await?; - let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; // ... and instruct each of those regions to delete the snapshot. for (dataset, region) in datasets_and_regions { @@ -925,13 +908,20 @@ async fn ssc_get_pantry_address( ); // If the disk is already attached to a Pantry, use that, otherwise get a - // random one. Return boolean indicating if additional saga nodes need to - // attach this disk to that random pantry. - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() + // random one. The address that was passed in the disk param might be stale + // so look it up again here. + // + // Return a boolean indicating if additional saga nodes need to attach this + // disk to that random pantry. + + let disk = match osagactx + .datastore() + .disk_get(&opctx, params.disk.id()) .await - .map_err(ActionError::action_failed)?; + .map_err(ActionError::action_failed)? + { + db::datastore::Disk::Crucible(disk) => disk, + }; let pantry_address = if let Some(pantry_address) = disk.pantry_address() { pantry_address @@ -968,7 +958,7 @@ async fn ssc_attach_disk_to_pantry( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -982,7 +972,7 @@ async fn ssc_attach_disk_to_pantry( // generation number is too low. match db_disk.state().into() { external::DiskState::Detached => { - info!(log, "setting state of {} to maintenance", params.disk_id); + info!(log, "setting state of {} to maintenance", params.disk.id()); osagactx .datastore() @@ -999,7 +989,7 @@ async fn ssc_attach_disk_to_pantry( // This saga is a sub-saga of the finalize saga if the user has // specified an optional snapshot should be taken. No state change // is required. - info!(log, "disk {} in state finalizing", params.disk_id); + info!(log, "disk {} in state finalizing", params.disk.id()); } external::DiskState::Attached(attach_instance_id) => { @@ -1007,7 +997,7 @@ async fn ssc_attach_disk_to_pantry( info!( log, "disk {} in state attached to instance id {}", - params.disk_id, + params.disk.id(), attach_instance_id ); } @@ -1029,7 +1019,7 @@ async fn ssc_attach_disk_to_pantry( // will be important later to *only* transition this disk out of maintenance // if the generation number matches what *this* saga is doing. let (.., db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Read) .await .map_err(ActionError::action_failed)?; @@ -1050,7 +1040,7 @@ async fn ssc_attach_disk_to_pantry_undo( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -1060,7 +1050,7 @@ async fn ssc_attach_disk_to_pantry_undo( info!( log, "undo: setting disk {} state from maintenance to detached", - params.disk_id + params.disk.id() ); osagactx @@ -1077,14 +1067,16 @@ async fn ssc_attach_disk_to_pantry_undo( external::DiskState::Detached => { info!( log, - "undo: disk {} already in state detached", params.disk_id + "undo: disk {} already in state detached", + params.disk.id() ); } external::DiskState::Finalizing => { info!( log, - "undo: disk {} already in state finalizing", params.disk_id + "undo: disk {} already in state finalizing", + params.disk.id() ); } @@ -1114,7 +1106,7 @@ async fn ssc_call_pantry_attach_for_disk( info!( log, "attaching disk {:?} to pantry at {:?}", - params.disk_id, + params.disk.id(), pantry_address ); @@ -1122,12 +1114,12 @@ async fn ssc_call_pantry_attach_for_disk( &log, &opctx, &osagactx.nexus(), - params.disk_id, + ¶ms.disk, pantry_address, ) .await?; } else { - info!(log, "disk {} already attached to a pantry", params.disk_id); + info!(log, "disk {} already attached to a pantry", params.disk.id()); } Ok(()) @@ -1147,14 +1139,14 @@ async fn ssc_call_pantry_attach_for_disk_undo( info!( log, "undo: detaching disk {:?} from pantry at {:?}", - params.disk_id, + params.disk.id(), pantry_address ); match call_pantry_detach( sagactx.user_data().nexus(), &log, - params.disk_id, + params.disk.id(), pantry_address, ) .await @@ -1164,7 +1156,7 @@ async fn ssc_call_pantry_attach_for_disk_undo( Err(err) => { return Err(anyhow!( "failed to detach disk {} from pantry at {}: {}", - params.disk_id, + params.disk.id(), pantry_address, InlineErrorChain::new(&err) )); @@ -1174,7 +1166,7 @@ async fn ssc_call_pantry_attach_for_disk_undo( info!( log, "undo: not detaching disk {}, was already attached to a pantry", - params.disk_id + params.disk.id() ); } @@ -1198,7 +1190,7 @@ async fn ssc_call_pantry_snapshot_for_disk( log, "sending snapshot request with id {} for disk {} to pantry endpoint {}", snapshot_id, - params.disk_id, + params.disk.id(), endpoint, ); @@ -1207,7 +1199,7 @@ async fn ssc_call_pantry_snapshot_for_disk( let snapshot_operation = || async { client .snapshot( - ¶ms.disk_id.to_string(), + ¶ms.disk.id().to_string(), &crucible_pantry_client::types::SnapshotRequest { snapshot_id: snapshot_id.to_string(), }, @@ -1233,22 +1225,15 @@ async fn ssc_call_pantry_snapshot_for_disk_undo( let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); let snapshot_id = sagactx.lookup::("snapshot_id")?; - let params = sagactx.saga_params::()?; info!(log, "Undoing pantry snapshot request for {snapshot_id}"); // Lookup the regions used by the source disk... - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() + let datasets_and_regions = osagactx + .datastore() + .get_allocated_regions(params.disk.volume_id()) .await?; - let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; // ... and instruct each of those regions to delete the snapshot. for (dataset, region) in datasets_and_regions { @@ -1274,13 +1259,13 @@ async fn ssc_call_pantry_detach_for_disk( info!( log, "detaching disk {:?} from pantry at {:?}", - params.disk_id, + params.disk.id(), pantry_address ); call_pantry_detach( sagactx.user_data().nexus(), &log, - params.disk_id, + params.disk.id(), pantry_address, ) .await @@ -1308,7 +1293,7 @@ async fn ssc_detach_disk_from_pantry( let (.., authz_disk, db_disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) + .disk_id(params.disk.id()) .fetch_for(authz::Action::Modify) .await .map_err(ActionError::action_failed)?; @@ -1331,7 +1316,7 @@ async fn ssc_detach_disk_from_pantry( info!( log, "setting disk {} state from maintenance to detached", - params.disk_id + params.disk.id() ); osagactx @@ -1346,8 +1331,9 @@ async fn ssc_detach_disk_from_pantry( } else { info!( log, - "disk {} has generation number {:?}, which doesn't match the expected {:?}: skip setting to detach", - params.disk_id, + "disk {} has generation number {:?}, which doesn't match \ + the expected {:?}: skip setting to detach", + params.disk.id(), db_disk.runtime().gen, expected_disk_generation_number, ); @@ -1355,11 +1341,11 @@ async fn ssc_detach_disk_from_pantry( } external::DiskState::Detached => { - info!(log, "disk {} already in state detached", params.disk_id); + info!(log, "disk {} already in state detached", params.disk.id()); } external::DiskState::Finalizing => { - info!(log, "disk {} already in state finalizing", params.disk_id); + info!(log, "disk {} already in state finalizing", params.disk.id()); } _ => { @@ -1376,25 +1362,15 @@ async fn ssc_start_running_snapshot( let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); let snapshot_id = sagactx.lookup::("snapshot_id")?; info!(log, "starting running snapshot"; "snapshot_id" => %snapshot_id); - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() - .await - .map_err(ActionError::action_failed)?; - // For each dataset and region that makes up the disk, create a map from the // region information to the new running snapshot information. let datasets_and_regions = osagactx .datastore() - .get_allocated_regions(disk.volume_id()) + .get_allocated_regions(params.disk.volume_id()) .await .map_err(ActionError::action_failed)?; @@ -1466,23 +1442,16 @@ async fn ssc_start_running_snapshot_undo( let log = sagactx.user_data().log(); let osagactx = sagactx.user_data(); let params = sagactx.saga_params::()?; - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); let snapshot_id = sagactx.lookup::("snapshot_id")?; info!(log, "Undoing snapshot start running request for {snapshot_id}"); // Lookup the regions used by the source disk... - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() + let datasets_and_regions = osagactx + .datastore() + .get_allocated_regions(params.disk.volume_id()) .await?; - let datasets_and_regions = - osagactx.datastore().get_allocated_regions(disk.volume_id()).await?; - // ... and instruct each of those regions to delete the running snapshot. for (dataset, region) in datasets_and_regions { osagactx @@ -1514,21 +1483,11 @@ async fn ssc_create_volume_record( // For a snapshot, copy the volume construction request at the time the // snapshot was taken. - let opctx = crate::context::op_context_for_saga_action( - &sagactx, - ¶ms.serialized_authn, - ); - - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() - .await - .map_err(ActionError::action_failed)?; let disk_volume = osagactx .datastore() .volume_checkout( - disk.volume_id(), + params.disk.volume_id(), db::datastore::VolumeCheckoutReason::CopyAndModify, ) .await @@ -1540,7 +1499,7 @@ async fn ssc_create_volume_record( serde_json::from_str(&disk_volume.data()).map_err(|e| { ActionError::action_failed(Error::internal_error(&format!( "failed to deserialize disk {} volume data: {}", - disk.id(), + params.disk.id(), e, ))) })?; @@ -1649,15 +1608,9 @@ async fn ssc_release_volume_lock( ¶ms.serialized_authn, ); - let (.., disk) = LookupPath::new(&opctx, osagactx.datastore()) - .disk_id(params.disk_id) - .fetch() - .await - .map_err(ActionError::action_failed)?; - osagactx .datastore() - .volume_repair_unlock(&opctx, disk.volume_id(), lock_id) + .volume_repair_unlock(&opctx, params.disk.volume_id(), lock_id) .await .map_err(ActionError::action_failed)?; @@ -1771,6 +1724,7 @@ mod test { use dropshot::test_util::ClientTestContext; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; + use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::InstanceAndActiveVmm; use nexus_test_utils::resource_helpers::create_default_ip_pool; use nexus_test_utils::resource_helpers::create_disk; @@ -2007,7 +1961,7 @@ mod test { opctx: &OpContext, silo_id: Uuid, project_id: Uuid, - disk_id: Uuid, + db_disk: db::datastore::CrucibleDisk, disk: NameOrId, attach_instance_id: Option, use_the_pantry: bool, @@ -2016,7 +1970,7 @@ mod test { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id, project_id, - disk_id, + disk: db_disk, attach_instance_id, use_the_pantry, create_params: params::SnapshotCreate { @@ -2058,6 +2012,9 @@ mod test { .await .expect("Failed to look up created disk"); + let Disk::Crucible(disk) = + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + let silo_id = authz_silo.id(); let project_id = authz_project.id(); @@ -2065,7 +2022,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk, Name::from_str(DISK_NAME).unwrap().into(), None, // not attached to an instance true, // use the pantry @@ -2279,6 +2236,12 @@ mod test { .identity .id; + let Disk::Crucible(disk) = nexus + .datastore() + .disk_get(&opctx, disk_id) + .await + .unwrap(); + // If the pantry isn't being used, make sure the disk is // attached. Note that under normal circumstances, a // disk can only be attached to a stopped instance, but @@ -2307,7 +2270,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk, Name::from_str(DISK_NAME).unwrap().into(), attach_instance_id, use_the_pantry, @@ -2413,6 +2376,9 @@ mod test { .await .expect("Failed to look up created disk"); + let Disk::Crucible(disk) = + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + let silo_id = authz_silo.id(); let project_id = authz_project.id(); @@ -2420,7 +2386,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk, Name::from_str(DISK_NAME).unwrap().into(), // The disk isn't attached at this time, so don't supply a sled. None, @@ -2461,12 +2427,14 @@ mod test { } // Detach the disk, then rerun the saga - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, nexus.datastore()) - .disk_id(disk_id) - .fetch_for(authz::Action::Read) - .await - .expect("Failed to look up created disk"); + let (.., authz_disk) = LookupPath::new(&opctx, nexus.datastore()) + .disk_id(disk_id) + .lookup_for(authz::Action::Read) + .await + .expect("Failed to look up created disk"); + + let Disk::Crucible(disk) = + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); assert!( nexus @@ -2474,7 +2442,7 @@ mod test { .disk_update_runtime( &opctx, &authz_disk, - &db_disk.runtime().detach(), + &disk.runtime().detach(), ) .await .expect("failed to detach disk") @@ -2485,7 +2453,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk, Name::from_str(DISK_NAME).unwrap().into(), // The disk isn't attached at this time, so don't supply a sled. None, @@ -2516,13 +2484,16 @@ mod test { // Build the saga DAG with the provided test parameters let opctx = test_opctx(cptestctx); - let (authz_silo, authz_project, _authz_disk) = + let (authz_silo, authz_project, authz_disk) = LookupPath::new(&opctx, nexus.datastore()) .disk_id(disk_id) .lookup_for(authz::Action::Read) .await .expect("Failed to look up created disk"); + let Disk::Crucible(disk) = + nexus.datastore().disk_get(&opctx, disk_id).await.unwrap(); + let silo_id = authz_silo.id(); let project_id = authz_project.id(); @@ -2538,7 +2509,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk.clone(), Name::from_str(DISK_NAME).unwrap().into(), Some(fake_instance_id), false, // use the pantry @@ -2548,12 +2519,6 @@ mod test { let runnable_saga = nexus.sagas.saga_prepare(dag).await.unwrap(); // Before running the saga, detach the disk! - let (.., authz_disk, db_disk) = - LookupPath::new(&opctx, nexus.datastore()) - .disk_id(disk_id) - .fetch_for(authz::Action::Modify) - .await - .expect("Failed to look up created disk"); assert!( nexus @@ -2561,7 +2526,7 @@ mod test { .disk_update_runtime( &opctx, &authz_disk, - &db_disk.runtime().detach(), + &disk.runtime().detach(), ) .await .expect("failed to detach disk") @@ -2593,7 +2558,7 @@ mod test { &opctx, silo_id, project_id, - disk_id, + disk, Name::from_str(DISK_NAME).unwrap().into(), Some(instance_state.instance().id()), false, // use the pantry diff --git a/nexus/src/app/snapshot.rs b/nexus/src/app/snapshot.rs index 6461366b85c..355e5290aec 100644 --- a/nexus/src/app/snapshot.rs +++ b/nexus/src/app/snapshot.rs @@ -12,6 +12,7 @@ use nexus_db_queries::authn; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore; use nexus_types::external_api::params; use nexus_types::external_api::params::DiskSelector; use omicron_common::api::external::CreateResult; @@ -96,6 +97,11 @@ impl super::Nexus { )); } + let disk: datastore::CrucibleDisk = + match self.datastore().disk_get(&opctx, authz_disk.id()).await? { + datastore::Disk::Crucible(disk) => disk, + }; + // If there isn't a running propolis, Nexus needs to use the Crucible // Pantry to make this snapshot let use_the_pantry = if let Some(attach_instance_id) = @@ -120,12 +126,14 @@ impl super::Nexus { true }; + let attach_instance_id = disk.runtime().attach_instance_id; + let saga_params = sagas::snapshot_create::Params { serialized_authn: authn::saga::Serialized::for_opctx(opctx), silo_id: authz_silo.id(), project_id: authz_project.id(), - disk_id: authz_disk.id(), - attach_instance_id: db_disk.runtime_state.attach_instance_id, + disk, + attach_instance_id, use_the_pantry, create_params: params.clone(), }; diff --git a/nexus/src/external_api/http_entrypoints.rs b/nexus/src/external_api/http_entrypoints.rs index c6aeabc60a5..a13a83d7ba9 100644 --- a/nexus/src/external_api/http_entrypoints.rs +++ b/nexus/src/external_api/http_entrypoints.rs @@ -2376,7 +2376,6 @@ impl NexusExternalApi for NexusExternalApiImpl { .disk_list(&opctx, &project_lookup, &paginated_by) .await? .into_iter() - .map(|disk| disk.into()) .collect(); Ok(HttpResponseOk(ScanByNameOrId::results_page( &query, @@ -2442,8 +2441,7 @@ impl NexusExternalApi for NexusExternalApiImpl { disk: path.disk, project: query.project, }; - let (.., disk) = - nexus.disk_lookup(&opctx, disk_selector)?.fetch().await?; + let disk = nexus.disk_get(&opctx, disk_selector).await?; Ok(HttpResponseOk(disk.into())) }; apictx @@ -2540,7 +2538,7 @@ impl NexusExternalApi for NexusExternalApiImpl { }; let disk_lookup = nexus.disk_lookup(&opctx, disk_selector)?; - nexus.disk_manual_import(&disk_lookup, params).await?; + nexus.disk_manual_import(&opctx, &disk_lookup, params).await?; Ok(HttpResponseUpdatedNoContent()) }; diff --git a/nexus/test-utils/src/sql.rs b/nexus/test-utils/src/sql.rs index 5d1b845f757..a37de4bea40 100644 --- a/nexus/test-utils/src/sql.rs +++ b/nexus/test-utils/src/sql.rs @@ -38,6 +38,8 @@ pub enum AnySqlType { DateTime, Enum(SqlEnum), Float4(f32), + Int1(i8), + Int2(i16), Int4(i32), Int8(i64), Json(serde_json::value::Value), @@ -69,6 +71,18 @@ impl From for AnySqlType { } } +impl From for AnySqlType { + fn from(value: i8) -> Self { + Self::Int1(value) + } +} + +impl From for AnySqlType { + fn from(value: i16) -> Self { + Self::Int2(value) + } +} + impl From for AnySqlType { fn from(value: i32) -> Self { Self::Int4(value) @@ -134,6 +148,12 @@ impl<'a> tokio_postgres::types::FromSql<'a> for AnySqlType { if Uuid::accepts(ty) { return Ok(AnySqlType::Uuid(Uuid::from_sql(ty, raw)?)); } + if i8::accepts(ty) { + return Ok(AnySqlType::Int1(i8::from_sql(ty, raw)?)); + } + if i16::accepts(ty) { + return Ok(AnySqlType::Int2(i16::from_sql(ty, raw)?)); + } if i32::accepts(ty) { return Ok(AnySqlType::Int4(i32::from_sql(ty, raw)?)); } diff --git a/nexus/tests/integration_tests/crucible_replacements.rs b/nexus/tests/integration_tests/crucible_replacements.rs index 657c5644cbe..d166c1b5834 100644 --- a/nexus/tests/integration_tests/crucible_replacements.rs +++ b/nexus/tests/integration_tests/crucible_replacements.rs @@ -15,6 +15,7 @@ use nexus_db_model::RegionReplacementState; use nexus_db_model::RegionSnapshotReplacementState; use nexus_db_queries::context::OpContext; use nexus_db_queries::db::DataStore; +use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::region_snapshot_replacement::*; use nexus_lockstep_client::types::LastResult; use nexus_test_utils::background::*; @@ -235,11 +236,8 @@ async fn test_region_replacement_does_not_create_freed_region( let disk = create_disk(&client, PROJECT_NAME, "disk").await; // Before expunging the physical disk, save the DB model - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -331,11 +329,8 @@ mod region_replacement { // Manually create the region replacement request for the first // allocated region of that disk - let (.., db_disk) = LookupPath::new(&opctx, &datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -791,11 +786,8 @@ async fn test_racing_replacements_for_soft_deleted_disk_volume( .await; // Before deleting the disk, save the DB model - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -1364,11 +1356,8 @@ mod region_snapshot_replacement { // Manually create the region snapshot replacement request for the // first allocated region of that disk - let (.., db_disk) = LookupPath::new(&opctx, &datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -1437,12 +1426,10 @@ mod region_snapshot_replacement { .await .unwrap(); - let (.., db_disk_from_snapshot) = - LookupPath::new(&opctx, &datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk_from_snapshot) = datastore + .disk_get(&opctx, disk_from_snapshot.identity.id) + .await + .unwrap(); assert!(volumes_set.contains(&db_snapshot.volume_id())); assert!(volumes_set.contains(&db_disk_from_snapshot.volume_id())); @@ -1679,12 +1666,11 @@ mod region_snapshot_replacement { .parsed_body() .unwrap(); - let (.., db_disk_from_snapshot) = - LookupPath::new(&self.opctx(), &self.datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk_from_snapshot) = self + .datastore + .disk_get(&self.opctx(), disk_from_snapshot.identity.id) + .await + .unwrap(); let result = self .datastore @@ -2077,11 +2063,8 @@ async fn test_replacement_sanity(cptestctx: &ControlPlaneTestContext) { .await; // Before expunging the physical disk, save the DB model - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -2188,11 +2171,8 @@ async fn test_region_replacement_triple_sanity( .await; // Before expunging any physical disk, save some DB models - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2314,11 +2294,8 @@ async fn test_region_replacement_triple_sanity_2( .await; // Before expunging any physical disk, save some DB models - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); let (.., db_snapshot) = LookupPath::new(&opctx, datastore) .snapshot_id(snapshot.identity.id) @@ -2476,11 +2453,8 @@ async fn test_replacement_sanity_twice(cptestctx: &ControlPlaneTestContext) { // Manually create region snapshot replacement requests for each region // snapshot. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -2583,11 +2557,8 @@ async fn test_read_only_replacement_sanity( // Manually create region snapshot replacement requests for each region // snapshot. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); @@ -2751,11 +2722,8 @@ async fn test_replacement_sanity_twice_after_snapshot_delete( // Manually create region snapshot replacement requests for each region // snapshot. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let Disk::Crucible(db_disk) = + datastore.disk_get(&opctx, disk.identity.id).await.unwrap(); assert_eq!(db_disk.id(), disk.identity.id); diff --git a/nexus/tests/integration_tests/disks.rs b/nexus/tests/integration_tests/disks.rs index ae12c3b37f4..de2d32e52c4 100644 --- a/nexus/tests/integration_tests/disks.rs +++ b/nexus/tests/integration_tests/disks.rs @@ -13,6 +13,7 @@ use nexus_config::RegionAllocationStrategy; use nexus_db_lookup::LookupPath; use nexus_db_model::PhysicalDiskPolicy; use nexus_db_queries::context::OpContext; +use nexus_db_queries::db::datastore; use nexus_db_queries::db::datastore::REGION_REDUNDANCY_THRESHOLD; use nexus_db_queries::db::datastore::RegionAllocationFor; use nexus_db_queries::db::datastore::RegionAllocationParameters; @@ -115,6 +116,21 @@ async fn create_project_and_pool(client: &ClientTestContext) -> Uuid { project.identity.id } +async fn get_crucible_disk( + datastore: &Arc, + opctx: &OpContext, + disk_id: Uuid, +) -> datastore::CrucibleDisk { + let disk = datastore + .disk_get(opctx, disk_id) + .await + .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + match disk { + datastore::Disk::Crucible(disk) => disk, + } +} + #[nexus_test] async fn test_disk_not_found_before_creation( cptestctx: &ControlPlaneTestContext, @@ -1955,11 +1971,8 @@ async fn test_region_allocation_strategy_random_is_idempotent( // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2085,11 +2098,8 @@ async fn test_single_region_allocate_for_replace( // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2169,11 +2179,7 @@ async fn test_single_region_allocate_for_replace_not_enough_zpools( // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2257,11 +2263,8 @@ async fn test_no_halt_disk_delete_one_region_on_expunged_agent( let disk = create_disk(&client, PROJECT_NAME, DISK_NAME).await; // Grab the db record now, before the delete - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() - .await - .unwrap(); + let disk_id = disk.identity.id; + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; // Choose one of the datasets, and drop the simulated Crucible agent let zpool = disk_test.zpools().next().expect("Expected at least one zpool"); @@ -2337,11 +2340,7 @@ async fn test_disk_expunge(cptestctx: &ControlPlaneTestContext) { // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); @@ -2400,11 +2399,7 @@ async fn test_do_not_provision_on_dataset(cptestctx: &ControlPlaneTestContext) { // Assert no region was allocated to the marked dataset let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() - .await - .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); + let db_disk = get_crucible_disk(datastore, &opctx, disk_id).await; let allocated_regions = datastore.get_allocated_regions(db_disk.volume_id()).await.unwrap(); diff --git a/nexus/tests/integration_tests/schema.rs b/nexus/tests/integration_tests/schema.rs index e85a02b9a77..582ac5f4baf 100644 --- a/nexus/tests/integration_tests/schema.rs +++ b/nexus/tests/integration_tests/schema.rs @@ -3127,6 +3127,354 @@ fn after_188_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { }) } +fn before_202_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { + Box::pin(async move { + ctx.client + .execute( + "INSERT INTO omicron.public.disk ( + id, + name, + description, + time_created, + time_modified, + time_deleted, + + rcgen, + project_id, + + volume_id, + + disk_state, + attach_instance_id, + state_generation, + slot, + time_state_updated, + + size_bytes, + block_size, + origin_snapshot, + origin_image, + + pantry_address + ) + VALUES + ( + 'd8b7ba02-4bd5-417d-84d3-67b4e65bec70', + 'regular', + 'just a disk', + '2025-10-21T20:26:25+0000', + '2025-10-21T20:26:25+0000', + NULL, + + 0, + 'd904d22e-cc45-4b2e-b37f-5864d8eba323', + + '1e17286f-107d-494c-8b2d-904b70d5b706', + + 'attached', + '9c1f6ee5-478f-4e6a-b481-be8b69f1daab', + 75, + 0, + '2025-10-21T20:27:11+0000', + + 1073741824, + '512', + 'b017fe60-9a4a-4fe6-a7d3-4a7c3ab23a98', + NULL, + + '[fd00:1122:3344:101::7]:4567' + ), + ( + '336ed8ca-9bbf-4db9-9f2d-627f8d156f91', + 'deleted', + 'should migrate deleted disks too, even if they are old', + '2024-10-21T20:26:25+0000', + '2024-10-21T20:26:25+0000', + '2025-10-15T21:38:05+0000', + + 2, + 'c9cfb288-a39e-4ebb-ac27-b26c9248a46a', + + '0a1d0a38-f927-4260-ad23-7f9c04277a23', + + 'detached', + NULL, + 75786, + 1, + '2025-10-14T20:27:11+0000', + + 1073741824, + '4096', + NULL, + '971395a7-4206-4dc4-ba58-e2402724270a', + + NULL + );", + &[], + ) + .await + .expect("inserted disks"); + }) +} + +fn after_202_0_0<'a>(ctx: &'a MigrationContext<'a>) -> BoxFuture<'a, ()> { + Box::pin(async move { + // first disk + + let rows = ctx + .client + .query( + "SELECT + id, + name, + description, + time_created, + time_modified, + time_deleted, + + rcgen, + project_id, + + disk_state, + attach_instance_id, + state_generation, + slot, + time_state_updated, + + size_bytes, + block_size + FROM + omicron.public.disk + WHERE + id = 'd8b7ba02-4bd5-417d-84d3-67b4e65bec70' + ;", + &[], + ) + .await + .expect("queried disk"); + + let rows = process_rows(&rows); + assert_eq!(rows.len(), 1); + let row = &rows[0]; + + assert!(row.values[5].expect("time_deleted").is_none()); + + assert_eq!( + vec![ + row.values[0].expect("id").unwrap(), + row.values[1].expect("name").unwrap(), + row.values[2].expect("description").unwrap(), + row.values[3].expect("time_created").unwrap(), + row.values[4].expect("time_modified").unwrap(), + row.values[6].expect("rcgen").unwrap(), + row.values[7].expect("project_id").unwrap(), + row.values[8].expect("disk_state").unwrap(), + row.values[9].expect("attach_instance_id").unwrap(), + row.values[10].expect("state_generation").unwrap(), + row.values[11].expect("slot").unwrap(), + row.values[12].expect("time_state_updated").unwrap(), + row.values[13].expect("size_bytes").unwrap(), + row.values[14].expect("block_size").unwrap(), + ], + vec![ + &AnySqlType::Uuid( + "d8b7ba02-4bd5-417d-84d3-67b4e65bec70".parse().unwrap() + ), + &AnySqlType::String("regular".to_string()), + &AnySqlType::String("just a disk".to_string()), + &AnySqlType::DateTime, + &AnySqlType::DateTime, + &AnySqlType::Int8(0), + &AnySqlType::Uuid( + "d904d22e-cc45-4b2e-b37f-5864d8eba323".parse().unwrap() + ), + &AnySqlType::String("attached".to_string()), + &AnySqlType::Uuid( + "9c1f6ee5-478f-4e6a-b481-be8b69f1daab".parse().unwrap() + ), + &AnySqlType::Int8(75), + &AnySqlType::Int2(0), + &AnySqlType::DateTime, + &AnySqlType::Int8(1073741824), + &AnySqlType::Enum(SqlEnum::from(("block_size", "512"))), + ], + ); + + let rows = ctx + .client + .query( + "SELECT + disk_id, + volume_id, + origin_snapshot, + origin_image, + pantry_address + FROM + omicron.public.disk_type_crucible + WHERE + disk_id = 'd8b7ba02-4bd5-417d-84d3-67b4e65bec70' + ;", + &[], + ) + .await + .expect("queried disk_type_crucible"); + + let rows = process_rows(&rows); + assert_eq!(rows.len(), 1); + let row = &rows[0]; + + assert!(row.values[3].expect("origin_image").is_none()); + + assert_eq!( + vec![ + row.values[0].expect("disk_id").unwrap(), + row.values[1].expect("volume_id").unwrap(), + row.values[2].expect("origin_snapshot").unwrap(), + row.values[4].expect("pantry_address").unwrap(), + ], + vec![ + &AnySqlType::Uuid( + "d8b7ba02-4bd5-417d-84d3-67b4e65bec70".parse().unwrap() + ), + &AnySqlType::Uuid( + "1e17286f-107d-494c-8b2d-904b70d5b706".parse().unwrap() + ), + &AnySqlType::Uuid( + "b017fe60-9a4a-4fe6-a7d3-4a7c3ab23a98".parse().unwrap() + ), + &AnySqlType::String("[fd00:1122:3344:101::7]:4567".to_string()), + ] + ); + + // second disk + + let rows = ctx + .client + .query( + "SELECT + id, + name, + description, + time_created, + time_modified, + time_deleted, + + rcgen, + project_id, + + disk_state, + attach_instance_id, + state_generation, + slot, + time_state_updated, + + size_bytes, + block_size + FROM + omicron.public.disk + WHERE + id = '336ed8ca-9bbf-4db9-9f2d-627f8d156f91' + ;", + &[], + ) + .await + .expect("queried disk"); + + let rows = process_rows(&rows); + assert_eq!(rows.len(), 1); + let row = &rows[0]; + + assert!(row.values[9].expect("attach_instance_id").is_none()); + + assert_eq!( + vec![ + row.values[0].expect("id").unwrap(), + row.values[1].expect("name").unwrap(), + row.values[2].expect("description").unwrap(), + row.values[3].expect("time_created").unwrap(), + row.values[4].expect("time_modified").unwrap(), + row.values[5].expect("time_deleted").unwrap(), + row.values[6].expect("rcgen").unwrap(), + row.values[7].expect("project_id").unwrap(), + row.values[8].expect("disk_state").unwrap(), + row.values[10].expect("state_generation").unwrap(), + row.values[11].expect("slot").unwrap(), + row.values[12].expect("time_state_updated").unwrap(), + row.values[13].expect("size_bytes").unwrap(), + row.values[14].expect("block_size").unwrap(), + ], + vec![ + &AnySqlType::Uuid( + "336ed8ca-9bbf-4db9-9f2d-627f8d156f91".parse().unwrap() + ), + &AnySqlType::String("deleted".to_string()), + &AnySqlType::String( + "should migrate deleted disks too, even if they are old" + .to_string() + ), + &AnySqlType::DateTime, + &AnySqlType::DateTime, + &AnySqlType::DateTime, + &AnySqlType::Int8(2), + &AnySqlType::Uuid( + "c9cfb288-a39e-4ebb-ac27-b26c9248a46a".parse().unwrap() + ), + &AnySqlType::String("detached".to_string()), + &AnySqlType::Int8(75786), + &AnySqlType::Int2(1), + &AnySqlType::DateTime, + &AnySqlType::Int8(1073741824), + &AnySqlType::Enum(SqlEnum::from(("block_size", "4096"))), + ], + ); + + let rows = ctx + .client + .query( + "SELECT + disk_id, + volume_id, + origin_snapshot, + origin_image, + pantry_address + FROM + omicron.public.disk_type_crucible + WHERE + disk_id = '336ed8ca-9bbf-4db9-9f2d-627f8d156f91' + ;", + &[], + ) + .await + .expect("queried disk_type_crucible"); + + let rows = process_rows(&rows); + assert_eq!(rows.len(), 1); + let row = &rows[0]; + + assert!(row.values[2].expect("origin_snapshot").is_none()); + assert!(row.values[4].expect("pantry_address").is_none()); + + assert_eq!( + vec![ + row.values[0].expect("disk_id").unwrap(), + row.values[1].expect("volume_id").unwrap(), + row.values[3].expect("origin_image").unwrap(), + ], + vec![ + &AnySqlType::Uuid( + "336ed8ca-9bbf-4db9-9f2d-627f8d156f91".parse().unwrap() + ), + &AnySqlType::Uuid( + "0a1d0a38-f927-4260-ad23-7f9c04277a23".parse().unwrap() + ), + &AnySqlType::Uuid( + "971395a7-4206-4dc4-ba58-e2402724270a".parse().unwrap() + ), + ] + ); + }) +} + // Lazily initializes all migration checks. The combination of Rust function // pointers and async makes defining a static table fairly painful, so we're // using lazy initialization instead. @@ -3233,6 +3581,10 @@ fn get_migration_checks() -> BTreeMap { Version::new(188, 0, 0), DataMigrationFns::new().before(before_188_0_0).after(after_188_0_0), ); + map.insert( + Version::new(202, 0, 0), + DataMigrationFns::new().before(before_202_0_0).after(after_202_0_0), + ); map } diff --git a/nexus/tests/integration_tests/snapshots.rs b/nexus/tests/integration_tests/snapshots.rs index 80807a78eb3..bbae7053abf 100644 --- a/nexus/tests/integration_tests/snapshots.rs +++ b/nexus/tests/integration_tests/snapshots.rs @@ -15,6 +15,7 @@ use nexus_db_model::to_db_typed_uuid; use nexus_db_queries::authz; use nexus_db_queries::context::OpContext; use nexus_db_queries::db; +use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::REGION_REDUNDANCY_THRESHOLD; use nexus_db_queries::db::datastore::RegionAllocationFor; use nexus_db_queries::db::datastore::RegionAllocationParameters; @@ -32,7 +33,6 @@ use nexus_types::external_api::params; use nexus_types::external_api::views; use omicron_common::api::external; use omicron_common::api::external::ByteCount; -use omicron_common::api::external::Disk; use omicron_common::api::external::DiskState; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Instance; @@ -109,7 +109,7 @@ async fn test_snapshot_basic(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let base_disk: Disk = NexusRequest::new( + let base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -219,7 +219,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let base_disk: Disk = NexusRequest::new( + let base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -234,7 +234,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { // Assert disk is detached let disk_url = format!("/v1/disks/{}?project={}", base_disk_name, PROJECT_NAME); - let disk: Disk = NexusRequest::object_get(client, &disk_url) + let disk: external::Disk = NexusRequest::object_get(client, &disk_url) .authn_as(AuthnMode::PrivilegedUser) .execute() .await @@ -266,7 +266,7 @@ async fn test_snapshot_without_instance(cptestctx: &ControlPlaneTestContext) { // Assert disk is still detached let disk_url = format!("/v1/disks/{}?project={}", base_disk_name, PROJECT_NAME); - let disk: Disk = NexusRequest::object_get(client, &disk_url) + let disk: external::Disk = NexusRequest::object_get(client, &disk_url) .authn_as(AuthnMode::PrivilegedUser) .execute() .await @@ -316,7 +316,7 @@ async fn test_snapshot_stopped_instance(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let base_disk: Disk = NexusRequest::new( + let base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -407,7 +407,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let base_disk: Disk = NexusRequest::new( + let base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -468,7 +468,7 @@ async fn test_delete_snapshot(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let _snap_disk: Disk = NexusRequest::new( + let _snap_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&snap_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -966,7 +966,7 @@ async fn test_snapshot_unwind(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let _base_disk: Disk = NexusRequest::new( + let _base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -1261,7 +1261,7 @@ async fn test_multiple_deletes_not_sent(cptestctx: &ControlPlaneTestContext) { size: disk_size, }; - let base_disk: Disk = NexusRequest::new( + let base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -1482,9 +1482,9 @@ async fn test_region_allocation_for_snapshot( // Assert disk has three allocated regions let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() + + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk_id) .await .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); diff --git a/nexus/tests/integration_tests/volume_management.rs b/nexus/tests/integration_tests/volume_management.rs index 5975a51d7f7..5dbc1a0af2f 100644 --- a/nexus/tests/integration_tests/volume_management.rs +++ b/nexus/tests/integration_tests/volume_management.rs @@ -29,6 +29,7 @@ use nexus_db_queries::db; use nexus_db_queries::db::DataStore; use nexus_db_queries::db::datastore::CrucibleResources; use nexus_db_queries::db::datastore::DestVolume; +use nexus_db_queries::db::datastore::Disk; use nexus_db_queries::db::datastore::ExistingTarget; use nexus_db_queries::db::datastore::RegionAllocationFor; use nexus_db_queries::db::datastore::RegionAllocationParameters; @@ -54,8 +55,8 @@ use nexus_types::external_api::params; use nexus_types::external_api::views; use nexus_types::identity::Asset; use nexus_types::identity::Resource; +use omicron_common::api::external; use omicron_common::api::external::ByteCount; -use omicron_common::api::external::Disk; use omicron_common::api::external::IdentityMetadataCreateParams; use omicron_common::api::external::Name; use omicron_common::api::internal; @@ -142,7 +143,7 @@ async fn create_base_disk( image: &views::Image, disks_url: &String, base_disk_name: &Name, -) -> Disk { +) -> external::Disk { let disk_size = ByteCount::from_gibibytes_u32(2); let base_disk = params::DiskCreate { identity: IdentityMetadataCreateParams { @@ -434,7 +435,7 @@ async fn test_snapshot_prevents_other_disk( assert!(disk_test.crucible_resources_deleted().await); // Disk allocation will work now - let _next_disk: Disk = NexusRequest::new( + let _next_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&next_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -481,7 +482,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( size: disk_size, }; - let first_disk: Disk = NexusRequest::new( + let first_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&first_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -523,7 +524,7 @@ async fn test_multiple_disks_multiple_snapshots_order_1( size: disk_size, }; - let second_disk: Disk = NexusRequest::new( + let second_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&second_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -616,7 +617,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( size: disk_size, }; - let first_disk: Disk = NexusRequest::new( + let first_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&first_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -658,7 +659,7 @@ async fn test_multiple_disks_multiple_snapshots_order_2( size: disk_size, }; - let second_disk: Disk = NexusRequest::new( + let second_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&second_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -746,7 +747,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( size: disk_size, }; - let layer_1_disk: Disk = NexusRequest::new( + let layer_1_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&layer_1_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -788,7 +789,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( size: disk_size, }; - let layer_2_disk: Disk = NexusRequest::new( + let layer_2_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&layer_2_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -830,7 +831,7 @@ async fn prepare_for_test_multiple_layers_of_snapshots( size: disk_size, }; - let layer_3_disk: Disk = NexusRequest::new( + let layer_3_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&layer_3_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -1183,7 +1184,7 @@ async fn delete_image_test( size: disk_size, }; - let _base_disk: Disk = NexusRequest::new( + let _base_disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&base_disk)) .expect_status(Some(StatusCode::CREATED)), @@ -2570,7 +2571,8 @@ async fn test_snapshot_create_saga_unwinds_correctly( size: disk_size, }; - let _disk: Disk = object_create(client, &disks_url, &base_disk).await; + let _disk: external::Disk = + object_create(client, &disks_url, &base_disk).await; // Set the third agent to fail creating the region for the snapshot let zpool = @@ -3489,7 +3491,7 @@ async fn test_cte_returns_regions(cptestctx: &ControlPlaneTestContext) { size: ByteCount::from_gibibytes_u32(2), }; - let disk: Disk = NexusRequest::new( + let disk: external::Disk = NexusRequest::new( RequestBuilder::new(client, Method::POST, &disks_url) .body(Some(&disk)) .expect_status(Some(StatusCode::CREATED)), @@ -3509,9 +3511,8 @@ async fn test_cte_returns_regions(cptestctx: &ControlPlaneTestContext) { let disk_id = disk.identity.id; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk_id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk_id) .await .unwrap_or_else(|_| panic!("test disk {:?} should exist", disk_id)); @@ -4095,9 +4096,8 @@ async fn test_read_only_region_reference_counting( // Perform region snapshot replacement for one of the snapshot's regions, // causing a read-only region to be created. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -4145,9 +4145,8 @@ async fn test_read_only_region_reference_counting( // The disk-from-snap VCR should also reference that read-only region - let (.., db_disk_from_snapshot) = LookupPath::new(&opctx, datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() + let Disk::Crucible(db_disk_from_snapshot) = datastore + .disk_get(&opctx, disk_from_snapshot.identity.id) .await .unwrap_or_else(|_| { panic!( @@ -4363,9 +4362,8 @@ async fn test_read_only_region_reference_counting_layers( // Perform region snapshot replacement for one of the snapshot's regions, // causing a read-only region to be created. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -4408,9 +4406,8 @@ async fn test_read_only_region_reference_counting_layers( // The disk-from-snap VCR should also reference that read-only region - let (.., db_disk_from_snapshot) = LookupPath::new(&opctx, datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() + let Disk::Crucible(db_disk_from_snapshot) = datastore + .disk_get(&opctx, disk_from_snapshot.identity.id) .await .unwrap_or_else(|_| { panic!( @@ -4596,9 +4593,8 @@ async fn test_volume_replace_snapshot_respects_accounting( let disk = create_disk(&client, PROJECT_NAME, "disk").await; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -4802,9 +4798,8 @@ async fn test_volume_remove_rop_respects_accounting( let disk = create_disk(&client, PROJECT_NAME, "disk").await; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -4832,9 +4827,8 @@ async fn test_volume_remove_rop_respects_accounting( ) .await; - let (.., db_disk_from_snapshot) = LookupPath::new(&opctx, datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() + let Disk::Crucible(db_disk_from_snapshot) = datastore + .disk_get(&opctx, disk_from_snapshot.identity.id) .await .unwrap_or_else(|_| { panic!( @@ -4965,9 +4959,8 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( let disk = create_disk(&client, PROJECT_NAME, "disk").await; - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -4995,9 +4988,8 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( ) .await; - let (.., db_disk_from_snapshot) = LookupPath::new(&opctx, datastore) - .disk_id(disk_from_snapshot.identity.id) - .fetch() + let Disk::Crucible(db_disk_from_snapshot) = datastore + .disk_get(&opctx, disk_from_snapshot.identity.id) .await .unwrap_or_else(|_| { panic!( @@ -5014,17 +5006,15 @@ async fn test_volume_remove_rop_respects_accounting_no_modify_others( ) .await; - let (.., db_another_disk_from_snapshot) = - LookupPath::new(&opctx, datastore) - .disk_id(another_disk_from_snapshot.identity.id) - .fetch() - .await - .unwrap_or_else(|_| { - panic!( - "another_disk_from_snapshot {:?} should exist", - another_disk_from_snapshot.identity.id - ) - }); + let Disk::Crucible(db_another_disk_from_snapshot) = datastore + .disk_get(&opctx, another_disk_from_snapshot.identity.id) + .await + .unwrap_or_else(|_| { + panic!( + "another_disk_from_snapshot {:?} should exist", + another_disk_from_snapshot.identity.id + ) + }); // Assert the correct volume resource usage records before the removal: the // snapshot volume, disk_from_snapshot volume, and @@ -5622,9 +5612,8 @@ async fn test_double_layer_with_read_only_region_delete( // Perform region snapshot replacement for one of the snapshot's targets, // causing a read-only region to be created. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); @@ -5731,9 +5720,8 @@ async fn test_double_layer_snapshot_with_read_only_region_delete_2( // Perform region snapshot replacement for two of the snapshot's targets, // causing two read-only regions to be created. - let (.., db_disk) = LookupPath::new(&opctx, datastore) - .disk_id(disk.identity.id) - .fetch() + let Disk::Crucible(db_disk) = datastore + .disk_get(&opctx, disk.identity.id) .await .unwrap_or_else(|_| panic!("disk {:?} should exist", disk.identity.id)); diff --git a/openapi/nexus.json b/openapi/nexus.json index 1428001ec44..e782ca6ae68 100644 --- a/openapi/nexus.json +++ b/openapi/nexus.json @@ -17777,6 +17777,9 @@ "device_path": { "type": "string" }, + "disk_type": { + "$ref": "#/components/schemas/DiskType" + }, "id": { "description": "unique, immutable, system-controlled identifier for each resource", "type": "string", @@ -17827,6 +17830,7 @@ "block_size", "description", "device_path", + "disk_type", "id", "name", "project_id", @@ -18195,6 +18199,12 @@ } ] }, + "DiskType": { + "type": "string", + "enum": [ + "crucible" + ] + }, "Distributiondouble": { "description": "A distribution is a sequence of bins and counts in those bins, and some statistical information tracked to compute the mean, standard deviation, and quantile estimates.\n\nMin, max, and the p-* quantiles are treated as optional due to the possibility of distribution operations, like subtraction.", "type": "object", diff --git a/schema/crdb/dbinit.sql b/schema/crdb/dbinit.sql index ca22be8fd9d..d6ce1e88e78 100644 --- a/schema/crdb/dbinit.sql +++ b/schema/crdb/dbinit.sql @@ -1474,6 +1474,10 @@ CREATE TYPE IF NOT EXISTS omicron.public.block_size AS ENUM ( '4096' ); +CREATE TYPE IF NOT EXISTS omicron.public.disk_type AS ENUM ( + 'crucible' +); + CREATE TABLE IF NOT EXISTS omicron.public.disk ( /* Identity metadata (resource) */ id UUID PRIMARY KEY, @@ -1491,13 +1495,6 @@ CREATE TABLE IF NOT EXISTS omicron.public.disk ( /* Every Disk is in exactly one Project at a time. */ project_id UUID NOT NULL, - /* Every disk consists of a root volume */ - volume_id UUID NOT NULL, - - /* - * TODO Would it make sense for the runtime state to live in a separate - * table? - */ /* Runtime state */ -- disk_state omicron.public.DiskState NOT NULL, /* TODO see above */ disk_state STRING(32) NOT NULL, @@ -1513,10 +1510,8 @@ CREATE TABLE IF NOT EXISTS omicron.public.disk ( /* Disk configuration */ size_bytes INT NOT NULL, block_size omicron.public.block_size NOT NULL, - origin_snapshot UUID, - origin_image UUID, - pantry_address TEXT + disk_type omicron.public.disk_type NOT NULL ); CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_project ON omicron.public.disk ( @@ -1536,10 +1531,22 @@ CREATE UNIQUE INDEX IF NOT EXISTS lookup_deleted_disk ON omicron.public.disk ( ) WHERE time_deleted IS NOT NULL; -CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_volume_id ON omicron.public.disk ( +CREATE TABLE IF NOT EXISTS omicron.public.disk_type_crucible ( + disk_id UUID PRIMARY KEY, + + /* Every Crucible disk consists of a root volume */ + volume_id UUID NOT NULL, + + origin_snapshot UUID, + origin_image UUID, + + pantry_address TEXT +); + +/* Multiple disks cannot share volumes */ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_volume_id ON omicron.public.disk_type_crucible ( volume_id -) WHERE - time_deleted IS NULL; +); CREATE TABLE IF NOT EXISTS omicron.public.image ( /* Identity metadata (resource) */ @@ -5783,10 +5790,6 @@ CREATE INDEX IF NOT EXISTS step_time_order on omicron.public.region_replacement_ CREATE INDEX IF NOT EXISTS search_for_repair_notifications ON omicron.public.upstairs_repair_notification (region_id, notification_type); -CREATE INDEX IF NOT EXISTS lookup_any_disk_by_volume_id ON omicron.public.disk ( - volume_id -); - CREATE TYPE IF NOT EXISTS omicron.public.region_snapshot_replacement_state AS ENUM ( 'requested', 'allocating', @@ -6854,7 +6857,7 @@ INSERT INTO omicron.public.db_metadata ( version, target_version ) VALUES - (TRUE, NOW(), NOW(), '201.0.0', NULL) + (TRUE, NOW(), NOW(), '202.0.0', NULL) ON CONFLICT DO NOTHING; COMMIT; diff --git a/schema/crdb/disk-types/up01.sql b/schema/crdb/disk-types/up01.sql new file mode 100644 index 00000000000..05ed9262d32 --- /dev/null +++ b/schema/crdb/disk-types/up01.sql @@ -0,0 +1,3 @@ +CREATE TYPE IF NOT EXISTS omicron.public.disk_type AS ENUM ( + 'crucible' +); diff --git a/schema/crdb/disk-types/up02.sql b/schema/crdb/disk-types/up02.sql new file mode 100644 index 00000000000..4a03f976445 --- /dev/null +++ b/schema/crdb/disk-types/up02.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS omicron.public.disk_type_crucible ( + disk_id UUID PRIMARY KEY, + volume_id UUID NOT NULL, + origin_snapshot UUID, + origin_image UUID, + pantry_address TEXT +); diff --git a/schema/crdb/disk-types/up03.sql b/schema/crdb/disk-types/up03.sql new file mode 100644 index 00000000000..e919643f973 --- /dev/null +++ b/schema/crdb/disk-types/up03.sql @@ -0,0 +1,11 @@ +SET LOCAL disallow_full_table_scans = 'off'; +INSERT INTO omicron.public.disk_type_crucible + SELECT + id as disk_id, + volume_id, + origin_snapshot, + origin_image, + pantry_address + FROM + omicron.public.disk +; diff --git a/schema/crdb/disk-types/up04.sql b/schema/crdb/disk-types/up04.sql new file mode 100644 index 00000000000..4ce8ff8a6c1 --- /dev/null +++ b/schema/crdb/disk-types/up04.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.disk DROP COLUMN IF EXISTS volume_id; diff --git a/schema/crdb/disk-types/up05.sql b/schema/crdb/disk-types/up05.sql new file mode 100644 index 00000000000..f82161a95d3 --- /dev/null +++ b/schema/crdb/disk-types/up05.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.disk DROP COLUMN IF EXISTS origin_snapshot; diff --git a/schema/crdb/disk-types/up06.sql b/schema/crdb/disk-types/up06.sql new file mode 100644 index 00000000000..4a276cffc12 --- /dev/null +++ b/schema/crdb/disk-types/up06.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.disk DROP COLUMN IF EXISTS origin_image; diff --git a/schema/crdb/disk-types/up07.sql b/schema/crdb/disk-types/up07.sql new file mode 100644 index 00000000000..3ece8bbd7c8 --- /dev/null +++ b/schema/crdb/disk-types/up07.sql @@ -0,0 +1 @@ +ALTER TABLE omicron.public.disk DROP COLUMN IF EXISTS pantry_address; diff --git a/schema/crdb/disk-types/up08.sql b/schema/crdb/disk-types/up08.sql new file mode 100644 index 00000000000..3a554d343a2 --- /dev/null +++ b/schema/crdb/disk-types/up08.sql @@ -0,0 +1,6 @@ +ALTER TABLE + omicron.public.disk +ADD COLUMN IF NOT EXISTS + disk_type omicron.public.disk_type NOT NULL +DEFAULT + 'crucible' diff --git a/schema/crdb/disk-types/up09.sql b/schema/crdb/disk-types/up09.sql new file mode 100644 index 00000000000..343254b18d8 --- /dev/null +++ b/schema/crdb/disk-types/up09.sql @@ -0,0 +1,5 @@ +ALTER TABLE + omicron.public.disk +ALTER COLUMN + disk_type +DROP DEFAULT; diff --git a/schema/crdb/disk-types/up10.sql b/schema/crdb/disk-types/up10.sql new file mode 100644 index 00000000000..026c2b1b882 --- /dev/null +++ b/schema/crdb/disk-types/up10.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS lookup_disk_by_volume_id; diff --git a/schema/crdb/disk-types/up11.sql b/schema/crdb/disk-types/up11.sql new file mode 100644 index 00000000000..fe6705b566e --- /dev/null +++ b/schema/crdb/disk-types/up11.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS lookup_any_disk_by_volume_id; diff --git a/schema/crdb/disk-types/up12.sql b/schema/crdb/disk-types/up12.sql new file mode 100644 index 00000000000..1d7d56e84f3 --- /dev/null +++ b/schema/crdb/disk-types/up12.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS lookup_disk_by_volume_id ON omicron.public.disk_type_crucible ( + volume_id +);