-
Notifications
You must be signed in to change notification settings - Fork 176
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
NVMe and resource disc testcase fixes #3432
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,6 +16,7 @@ | |
from lisa.tools import Ls, Lspci, Nvmecli | ||
from lisa.tools.lspci import PciDevice | ||
from lisa.util import field_metadata, get_matched_str | ||
from lisa.util.constants import DEVICE_TYPE_NVME | ||
|
||
|
||
class Nvme(Feature): | ||
|
@@ -42,6 +43,9 @@ class Nvme(Feature): | |
# /dev/nvme0n1p15 -> /dev/nvme0n1 | ||
NVME_NAMESPACE_PATTERN = re.compile(r"/dev/nvme[0-9]+n[0-9]+", re.M) | ||
|
||
# /dev/nvme0n1p15 -> /dev/nvme0n1 | ||
NVME_DEVICE_PATTERN = re.compile(r"/dev/nvme[0-9]+", re.M) | ||
|
||
_pci_device_name = "Non-Volatile memory controller" | ||
_ls_devices: str = "" | ||
|
||
|
@@ -63,6 +67,17 @@ def get_devices(self) -> List[str]: | |
matched_result = self._device_pattern.match(row) | ||
if matched_result: | ||
devices_list.append(matched_result.group("device_name")) | ||
node_disk = self._node.features[Disk] | ||
# With disk controller type NVMe, all remote managed disks attached to the VM | ||
# (including the OS disc) appear as NVMe devices. | ||
# All the remote managed disks (inclusing the OS disc) use the same NVMe | ||
# controller in the VM. | ||
# Excluding the OS NVMe device from the list of NVMe devices will remove | ||
# all the remote managed disks. | ||
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME: | ||
os_disk_nvme_device = self._get_os_disk_nvme_device() | ||
# Removing OS disk/device from the list. | ||
devices_list.remove(os_disk_nvme_device) | ||
return devices_list | ||
|
||
def get_namespaces(self) -> List[str]: | ||
|
@@ -78,7 +93,17 @@ def get_namespaces(self) -> List[str]: | |
return namespaces | ||
|
||
def get_namespaces_from_cli(self) -> List[str]: | ||
return self._node.tools[Nvmecli].get_namespaces() | ||
namespaces_list = self._node.tools[Nvmecli].get_namespaces() | ||
node_disk = self._node.features[Disk] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The logic is repeated multiple times, it should be a new method like |
||
# With disk controller type NVMe, OS disk along with all remote iSCSI devices | ||
# appears as NVMe. | ||
# Removing OS disk from the list of NVMe devices will remove all the | ||
# remote non-NVME disks. | ||
if node_disk.get_os_disk_controller_type() == schema.DiskControllerType.NVME: | ||
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() | ||
# Removing OS disk/device from the list. | ||
namespaces_list.remove(os_disk_nvme_namespace) | ||
return namespaces_list | ||
|
||
def get_os_disk_nvme_namespace(self) -> str: | ||
node_disk = self._node.features[Disk] | ||
|
@@ -93,10 +118,22 @@ def get_os_disk_nvme_namespace(self) -> str: | |
) | ||
return os_partition_namespace | ||
|
||
# This method returns NVMe device name of the OS disk. | ||
def _get_os_disk_nvme_device(self) -> str: | ||
os_disk_nvme_namespace = self.get_os_disk_nvme_namespace() | ||
# Sample os_boot_partition when disc controller type is NVMe: | ||
# name: /dev/nvme0n1p15, disk: nvme, mount_point: /boot/efi, type: vfat | ||
if os_disk_nvme_namespace: | ||
os_disk_nvme_device = get_matched_str( | ||
os_disk_nvme_namespace, | ||
self.NVME_DEVICE_PATTERN, | ||
) | ||
return os_disk_nvme_device | ||
|
||
def get_devices_from_lspci(self) -> List[PciDevice]: | ||
devices_from_lspci = [] | ||
lspci_tool = self._node.tools[Lspci] | ||
device_list = lspci_tool.get_devices() | ||
device_list = lspci_tool.get_devices_by_type(DEVICE_TYPE_NVME) | ||
devices_from_lspci = [ | ||
x for x in device_list if self._pci_device_name == x.device_class | ||
] | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1897,6 +1897,20 @@ def remove_data_disk(self, names: Optional[List[str]] = None) -> None: | |
self._node.capability.disk.data_disk_count -= len(names) | ||
self._node.close() | ||
|
||
def get_resource_disks(self) -> List[str]: | ||
resource_disk_mount_point = self.get_resource_disk_mount_point() | ||
resource_disks = [] | ||
try: | ||
resourcedisk = self._node.features[Disk].get_partition_with_mount_point( | ||
resource_disk_mount_point | ||
) | ||
if resourcedisk: | ||
resource_disks = [resourcedisk.name] | ||
except AssertionError: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there a way not check by the error? The error can be happened by other reasons, the eating code may hide other problems. |
||
nvme = self._node.features[Nvme] | ||
resource_disks = nvme.get_raw_nvme_disks() | ||
return resource_disks | ||
|
||
def get_resource_disk_mount_point(self) -> str: | ||
# get customize mount point from cloud-init configuration file from /etc/cloud/ | ||
# if not found, use default mount point /mnt for cloud-init | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1115,32 +1115,51 @@ def verify_no_pre_exist_users(self, node: Node) -> None: | |
), | ||
) | ||
def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: | ||
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() | ||
|
||
# verify that resource disk is mounted | ||
# function returns successfully if disk matching mount point is present | ||
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) | ||
|
||
# Verify lost+found folder exists | ||
# Skip this step for BSD as it does not have lost+found folder | ||
# since it uses UFS file system | ||
if not isinstance(node.os, BSD): | ||
fold_path = f"{resource_disk_mount_point}/lost+found" | ||
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) | ||
assert_that(folder_exists, f"{fold_path} should be present").is_true() | ||
|
||
# verify DATALOSS_WARNING_README.txt file exists | ||
file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt" | ||
file_exists = node.tools[Ls].path_exists(file_path, sudo=True) | ||
assert_that(file_exists, f"{file_path} should be present").is_true() | ||
|
||
# verify 'WARNING: THIS IS A TEMPORARY DISK' contained in | ||
# DATALOSS_WARNING_README.txt file. | ||
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) | ||
assert_that( | ||
read_text, | ||
f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}", | ||
).contains("WARNING: THIS IS A TEMPORARY DISK") | ||
node_disc = node.features[Disk] | ||
resource_disks = node_disc.get_resource_disks() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add a method in |
||
if not resource_disks: | ||
raise LisaException("Resource disk not found") | ||
if "nvme" in resource_disks[0]: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As above comments, use the type enum, instead of checking by string. |
||
raise SkippedException( | ||
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This error message is unclear, why the tests should be skipped, if the resource disk is nvme. Please check if it's valid to skip for nvme, if so, update the description. |
||
) | ||
else: | ||
resource_disk_mount_point = node.features[ | ||
Disk | ||
].get_resource_disk_mount_point() | ||
|
||
# verify that resource disk is mounted | ||
# function returns successfully if disk matching mount point is present | ||
node.features[Disk].get_partition_with_mount_point( | ||
resource_disk_mount_point | ||
) | ||
# Verify lost+found folder exists | ||
# Skip this step for BSD as it does not have lost+found folder | ||
# since it uses UFS file system | ||
if not isinstance(node.os, BSD): | ||
fold_path = f"{resource_disk_mount_point}/lost+found" | ||
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) | ||
assert_that(folder_exists, f"{fold_path} should be present").is_true() | ||
|
||
# Verify lost+found folder exists | ||
# Skip this step for BSD as it does not have lost+found folder | ||
# since it uses UFS file system | ||
if not isinstance(node.os, BSD): | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it miscopied from above lines/ They are looks the same as above parts. |
||
fold_path = f"{resource_disk_mount_point}/lost+found" | ||
folder_exists = node.tools[Ls].path_exists(fold_path, sudo=True) | ||
assert_that(folder_exists, f"{fold_path} should be present").is_true() | ||
|
||
# verify DATALOSS_WARNING_README.txt file exists | ||
file_path = f"{resource_disk_mount_point}/DATALOSS_WARNING_README.txt" | ||
file_exists = node.tools[Ls].path_exists(file_path, sudo=True) | ||
assert_that(file_exists, f"{file_path} should be present").is_true() | ||
# verify 'WARNING: THIS IS A TEMPORARY DISK' contained in | ||
# DATALOSS_WARNING_README.txt file. | ||
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) | ||
assert_that( | ||
read_text, | ||
f"'WARNING: THIS IS A TEMPORARY DISK' should be present in {file_path}", | ||
).contains("WARNING: THIS IS A TEMPORARY DISK") | ||
|
||
@TestCaseMetadata( | ||
description=""" | ||
|
@@ -1159,14 +1178,29 @@ def verify_resource_disk_readme_file(self, node: RemoteNode) -> None: | |
), | ||
) | ||
def verify_resource_disk_file_system(self, node: RemoteNode) -> None: | ||
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() | ||
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) | ||
disk_info = node.tools[Lsblk].find_disk_by_mountpoint(resource_disk_mount_point) | ||
for partition in disk_info.partitions: | ||
# by default, resource disk comes with ntfs type | ||
# waagent or cloud-init will format it unless there are some commands hung | ||
# or interrupt | ||
assert_that( | ||
partition.fstype, | ||
"Resource disk file system type should not equal to ntfs", | ||
).is_not_equal_to("ntfs") | ||
node_disc = node.features[Disk] | ||
resource_disks = node_disc.get_resource_disks() | ||
if not resource_disks: | ||
raise LisaException("Resource disk not found") | ||
if "nvme" in resource_disks[0]: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Wrap the repeat logic to reuse. |
||
raise SkippedException( | ||
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 | ||
) | ||
else: | ||
resource_disk_mount_point = node.features[ | ||
Disk | ||
].get_resource_disk_mount_point() | ||
node.features[Disk].get_partition_with_mount_point( | ||
resource_disk_mount_point | ||
) | ||
disk_info = node.tools[Lsblk].find_disk_by_mountpoint( | ||
resource_disk_mount_point | ||
) | ||
for partition in disk_info.partitions: | ||
# by default, resource disk comes with ntfs type | ||
# waagent or cloud-init will format it unless there are some commands | ||
# hung or interrupt | ||
assert_that( | ||
partition.fstype, | ||
"Resource disk file system type should not equal to ntfs", | ||
).is_not_equal_to("ntfs") |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -160,32 +160,43 @@ def verify_disks_device_timeout_setting( | |
), | ||
) | ||
def verify_resource_disk_mounted(self, node: RemoteNode) -> None: | ||
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() | ||
# os disk(root disk) is the entry with mount point `/' in the output | ||
# of `mount` command | ||
os_disk = ( | ||
node.features[Disk] | ||
.get_partition_with_mount_point(self.os_disk_mount_point) | ||
.disk | ||
) | ||
if isinstance(node.os, BSD): | ||
partition_info = node.tools[Mount].get_partition_info() | ||
resource_disk_from_mtab = [ | ||
entry | ||
for entry in partition_info | ||
if entry.mount_point == resource_disk_mount_point | ||
][0].mount_point | ||
node_disc = node.features[Disk] | ||
resource_disks = node_disc.get_resource_disks() | ||
if not resource_disks: | ||
raise LisaException("Resource disk not found") | ||
if "nvme" in resource_disks[0]: | ||
raise SkippedException( | ||
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 | ||
) | ||
else: | ||
mtab = node.tools[Cat].run("/etc/mtab").stdout | ||
resource_disk_from_mtab = get_matched_str( | ||
mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point) | ||
resource_disk_mount_point = node.features[ | ||
Disk | ||
].get_resource_disk_mount_point() | ||
# os disk(root disk) is the entry with mount point `/' in the output | ||
# of `mount` command | ||
os_disk = ( | ||
node.features[Disk] | ||
.get_partition_with_mount_point(self.os_disk_mount_point) | ||
.disk | ||
) | ||
assert ( | ||
resource_disk_from_mtab | ||
), f"resource disk mountpoint not found {resource_disk_mount_point}" | ||
assert_that( | ||
resource_disk_from_mtab, "Resource disk should not be equal to os disk" | ||
).is_not_equal_to(os_disk) | ||
if isinstance(node.os, BSD): | ||
partition_info = node.tools[Mount].get_partition_info() | ||
resource_disk_from_mtab = [ | ||
entry | ||
for entry in partition_info | ||
if entry.mount_point == resource_disk_mount_point | ||
][0].mount_point | ||
else: | ||
mtab = node.tools[Cat].run("/etc/mtab").stdout | ||
resource_disk_from_mtab = get_matched_str( | ||
mtab, self._get_mtab_mount_point_regex(resource_disk_mount_point) | ||
) | ||
assert ( | ||
resource_disk_from_mtab | ||
), f"resource disk mountpoint not found {resource_disk_mount_point}" | ||
assert_that( | ||
resource_disk_from_mtab, "Resource disk should not be equal to os disk" | ||
).is_not_equal_to(os_disk) | ||
|
||
@TestCaseMetadata( | ||
description=""" | ||
|
@@ -199,7 +210,7 @@ def verify_resource_disk_mounted(self, node: RemoteNode) -> None: | |
priority=1, | ||
requirement=simple_requirement( | ||
supported_platform_type=[AZURE], | ||
unsupported_os=[BSD, Windows] | ||
unsupported_os=[BSD, Windows], | ||
# This test is skipped as waagent does not support freebsd fully | ||
), | ||
) | ||
|
@@ -229,27 +240,40 @@ def verify_swap(self, node: RemoteNode) -> None: | |
), | ||
) | ||
def verify_resource_disk_io(self, node: RemoteNode) -> None: | ||
resource_disk_mount_point = node.features[Disk].get_resource_disk_mount_point() | ||
|
||
# verify that resource disk is mounted | ||
# function returns successfully if disk matching mount point is present | ||
node.features[Disk].get_partition_with_mount_point(resource_disk_mount_point) | ||
node_disc = node.features[Disk] | ||
resource_disks = node_disc.get_resource_disks() | ||
if not resource_disks: | ||
raise LisaException("Resource disk not found") | ||
if "nvme" in resource_disks[0]: | ||
raise SkippedException( | ||
f"Resource disk type is NVMe and the VM has {len(resource_disks)} NVMe disks" # noqa: E501 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The skip message doesn't explain why nvme disk should be skipped. |
||
) | ||
else: | ||
resource_disk_mount_point = node.features[ | ||
Disk | ||
].get_resource_disk_mount_point() | ||
|
||
# verify that resource disk is mounted | ||
# function returns successfully if disk matching mount point is present | ||
node.features[Disk].get_partition_with_mount_point( | ||
resource_disk_mount_point | ||
) | ||
|
||
file_path = f"{resource_disk_mount_point}/sample.txt" | ||
original_text = "Writing to resource disk!!!" | ||
file_path = f"{resource_disk_mount_point}/sample.txt" | ||
original_text = "Writing to resource disk!!!" | ||
|
||
# write content to the file | ||
node.tools[Echo].write_to_file( | ||
original_text, node.get_pure_path(file_path), sudo=True | ||
) | ||
# write content to the file | ||
node.tools[Echo].write_to_file( | ||
original_text, node.get_pure_path(file_path), sudo=True | ||
) | ||
|
||
# read content from the file | ||
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) | ||
# read content from the file | ||
read_text = node.tools[Cat].read(file_path, force_run=True, sudo=True) | ||
|
||
assert_that( | ||
read_text, | ||
"content read from file should be equal to content written to file", | ||
).is_equal_to(original_text) | ||
assert_that( | ||
read_text, | ||
"content read from file should be equal to content written to file", | ||
).is_equal_to(original_text) | ||
|
||
@TestCaseMetadata( | ||
description=""" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The default implementation should return an empty list, because "resource disk" is a concept on cloud or Azure, not all platforms. All test cases, which used the resource disk, should handle the empty list.