From 161dfbcb2f22e1061be15cdbe1f9fdf08266e4a1 Mon Sep 17 00:00:00 2001 From: Adam Dyess Date: Mon, 3 Feb 2025 08:17:22 -0600 Subject: [PATCH] Support alternate names for ceph-fs charm and associated storage class (#33) * Support alternate names for ceph-fs charm and associated storage class * Pythonic error handling --- config.yaml | 22 ++- src/charm.py | 36 ++--- src/manifests_base.py | 49 +++---- src/manifests_cephfs.py | 137 +++++++++++++----- src/manifests_rbd.py | 6 +- tests/unit/test_charm.py | 79 +++++----- tests/unit/test_manifests_cephfs.py | 122 ++++++++++++---- upstream/rbd/manifests/v3.10.0/csidriver.yaml | 17 +++ upstream/rbd/manifests/v3.10.1/csidriver.yaml | 17 +++ upstream/rbd/manifests/v3.10.2/csidriver.yaml | 17 +++ upstream/rbd/manifests/v3.11.0/csidriver.yaml | 17 +++ upstream/rbd/manifests/v3.7.2/csidriver.yaml | 18 +++ upstream/rbd/manifests/v3.8.0/csidriver.yaml | 16 ++ upstream/rbd/manifests/v3.8.1/csidriver.yaml | 16 ++ upstream/rbd/manifests/v3.9.0/csidriver.yaml | 17 +++ 15 files changed, 425 insertions(+), 161 deletions(-) create mode 100644 upstream/rbd/manifests/v3.10.0/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.10.1/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.10.2/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.11.0/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.7.2/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.8.0/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.8.1/csidriver.yaml create mode 100644 upstream/rbd/manifests/v3.9.0/csidriver.yaml diff --git a/config.yaml b/config.yaml index 3c51dd5..dd3a529 100644 --- a/config.yaml +++ b/config.yaml @@ -19,6 +19,24 @@ options: description: "Whether or not csi-*plugin-provisioner deployments use host-networking" type: boolean + cephfs-storage-class-name-formatter: + default: "cephfs" + description: | + Formatter for the cephfs storage class name + + The formatter is a string that can contain the following placeholders: + - {app} - the name of the juju application + - {namespace} - the charm configured namespace + - {name} - the name of the filesystem + - {pool} - the name of the data-pool + - {pool-id} - the id of the data-pool + + Example: + juju config ceph-csi cephfs-storage-class-name-formatter="{cluster}-{namespace}-{storageclass}" + + The default is to use the storage class name + type: string + cephfs-enable: default: false description: | @@ -38,8 +56,8 @@ options: cephfs-storage-class-parameters: default: "" description: | - Parameters to be used when creating the cephfs storage class. - Changes are only applied to the storage class if it does not exist. + Parameters to be used when creating the cephfs storage classes. + Changes are only applied to the storage classes if it does not exist. Declare additional/replacement parameters in key=value format, separated by spaces. Declare removed parameters in the key- format, separated by spaces. diff --git a/src/charm.py b/src/charm.py index 924affb..b1580c5 100755 --- a/src/charm.py +++ b/src/charm.py @@ -23,7 +23,7 @@ from ops.manifests import Collector, ManifestClientError from manifests_base import Manifests, SafeManifest -from manifests_cephfs import CephFSManifests, CephStorageClass +from manifests_cephfs import CephFilesystem, CephFSManifests, CephStorageClass from manifests_config import ConfigManifests from manifests_rbd import RBDManifests @@ -228,20 +228,17 @@ def get_ceph_fsid(self) -> str: return "" @lru_cache(maxsize=None) - def get_ceph_fsname(self) -> Optional[str]: - """Get the Ceph FS Name.""" + def get_ceph_fs_list(self) -> List[CephFilesystem]: + """Get a list of CephFS names and list of associated pools.""" try: data = json.loads(self.ceph_cli("fs", "ls", "-f", "json")) except (subprocess.SubprocessError, ValueError) as e: logger.error( - "get_ceph_fsname: Failed to get CephFS name, reporting as None, error: %s", e + "get_ceph_fs_list: Failed to find CephFS name, reporting as None, error: %s", e ) - return None - for fs in data: - if CephStorageClass.POOL in fs["data_pools"]: - logger.error("get_ceph_fsname: got cephfs name: %s", fs["name"]) - return fs["name"] - return None + return [] + + return [CephFilesystem(**fs) for fs in data] @property def provisioner_replicas(self) -> int: @@ -285,7 +282,7 @@ def ceph_context(self) -> Dict[str, Any]: "user": self.app.name, "provisioner_replicas": self.provisioner_replicas, "enable_host_network": json.dumps(self.enable_host_network), - "fsname": self.get_ceph_fsname(), + CephStorageClass.FILESYSTEM_LISTING: self.get_ceph_fs_list(), } @status.on_error(ops.WaitingStatus("Waiting for kubeconfig")) @@ -306,20 +303,11 @@ def check_namespace(self) -> None: status.add(ops.WaitingStatus("Waiting for Kubernetes API")) raise status.ReconcilerError("Waiting for Kubernetes API") - @status.on_error(ops.BlockedStatus("CephFS is not usable; set 'cephfs-enable=False'")) - def check_cephfs(self) -> None: - self.unit.status = ops.MaintenanceStatus("Evaluating CephFS capability") + def enforce_cephfs_enabled(self) -> None: + """Determine if CephFS should be enabled or disabled.""" disabled = self.config.get("cephfs-enable") is False - if disabled: - # not enabled, not a problem - logger.info("CephFS is disabled") - elif not self.ceph_context.get("fsname", None): - logger.error( - "Ceph CLI failed to find a CephFS fsname. Run 'juju config cephfs-enable=False' until ceph-fs is usable." - ) - raise status.ReconcilerError("CephFS is not usable; set 'cephfs-enable=False'") - if disabled and self.unit.is_leader(): + self.unit.status = ops.MaintenanceStatus("Disabling CephFS") self._purge_manifest_by_name("cephfs") def evaluate_manifests(self) -> int: @@ -390,7 +378,7 @@ def reconcile(self, event: ops.EventBase) -> None: self.check_namespace() self.check_ceph_client() self.configure_ceph_cli() - self.check_cephfs() + self.enforce_cephfs_enabled() hash = self.evaluate_manifests() self.install_manifests(config_hash=hash) self._update_status() diff --git a/src/manifests_base.py b/src/manifests_base.py index 76ad913..19a12f3 100644 --- a/src/manifests_base.py +++ b/src/manifests_base.py @@ -1,13 +1,12 @@ import logging import pickle -from abc import ABCMeta, abstractmethod from hashlib import md5 from typing import Any, Dict, Generator, List, Optional from lightkube.codecs import AnyResource from lightkube.core.resource import NamespacedResource from lightkube.models.core_v1 import Toleration -from ops.manifests import Addition, Manifests, Patch +from ops.manifests import Manifests, Patch log = logging.getLogger(__name__) @@ -92,35 +91,6 @@ def filter_containers(self, containers: list) -> Generator: yield container -class StorageClassAddition(Addition): - """Base class for storage class additions.""" - - __metaclass__ = ABCMeta - - @property - @abstractmethod - def name(self) -> str: - """Name of the storage class.""" - raise NotImplementedError - - def update_parameters(self, parameters: Dict[str, str]) -> None: - """Adjust parameters for storage class.""" - config = f"{self.name}-storage-class-parameters" - adjustments = self.manifests.config.get(config) - if not adjustments: - log.info(f"No adjustments for {self.name} storage-class parameters") - return - - for adjustment in adjustments.split(" "): - key_value = adjustment.split("=", 1) - if len(key_value) == 2: - parameters[key_value[0]] = key_value[1] - elif adjustment.endswith("-"): - parameters.pop(adjustment[:-1], None) - else: - log.warning("Invalid parameter: %s in %s", adjustment, config) - - class CephToleration(Toleration): @classmethod def _from_string(cls, toleration_str: str) -> "CephToleration": @@ -160,3 +130,20 @@ def from_space_separated(cls, tolerations: str) -> List["CephToleration"]: return [cls._from_string(toleration) for toleration in tolerations.split()] except ValueError as e: raise ValueError(f"Invalid tolerations: {e}") from e + + +def update_storage_params(ceph_type: str, config: Dict[str, Any], params: Dict[str, str]) -> None: + """Adjust parameters for storage class.""" + cfg_name = f"{ceph_type}-storage-class-parameters" + if not (adjustments := config.get(cfg_name)): + log.info(f"No adjustments for {ceph_type} storage-class parameters") + return + + for adjustment in adjustments.split(" "): + key_value = adjustment.split("=", 1) + if len(key_value) == 2: + params[key_value[0]] = key_value[1] + elif adjustment.endswith("-"): + params.pop(adjustment[:-1], None) + else: + log.warning("Invalid parameter: %s in %s", adjustment, cfg_name) diff --git a/src/manifests_cephfs.py b/src/manifests_cephfs.py index 4a5be4f..0c56626 100644 --- a/src/manifests_cephfs.py +++ b/src/manifests_cephfs.py @@ -3,6 +3,7 @@ """Implementation of rbd specific details of the kubernetes manifests.""" import logging +from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from lightkube.codecs import AnyResource @@ -16,7 +17,7 @@ CephToleration, ConfigureLivenessPrometheus, SafeManifest, - StorageClassAddition, + update_storage_params, ) if TYPE_CHECKING: @@ -25,6 +26,27 @@ log = logging.getLogger(__name__) +@dataclass +class CephFilesystem: + """Ceph Filesystem details.""" + + name: str + metadata_pool: str + metadata_pool_id: int + data_pool_ids: List[int] + data_pools: List[str] + + +@dataclass +class CephStorageClassParameters: + """Ceph Storage class parameters.""" + + cluster_id: str + storage_class_name: str + filesystem_name: str + data_pool: str + + class StorageSecret(Addition): """Create secret for the deployment.""" @@ -56,56 +78,40 @@ def __call__(self) -> Optional[AnyResource]: ) -class CephStorageClass(StorageClassAddition): +class CephStorageClass(Addition): """Create ceph storage classes.""" STORAGE_NAME = "cephfs" - REQUIRED_CONFIG = {"fsid", "fsname"} + STORAGE_NAME_FORMATTER = "cephfs-storage-class-name-formatter" + FILESYSTEM_LISTING = "fs_list" + REQUIRED_CONFIG = {STORAGE_NAME_FORMATTER, "fsid", FILESYSTEM_LISTING} PROVISIONER = "cephfs.csi.ceph.com" - POOL = "ceph-fs_data" - - @property - def name(self) -> str: - return self.STORAGE_NAME - def __call__(self) -> Optional[AnyResource]: - """Craft the storage class object.""" - if not self.manifests.config["enabled"]: - log.info("Ignore CephFS Storage Class") - return None - - clusterID = self.manifests.config.get("fsid") - if not clusterID: - log.error("CephFS is missing required storage item: 'clusterID'") - return None - - fsname = self.manifests.config.get("fsname") - if not fsname: - log.error("CephFS is missing required storage item: 'fsname'") - return None + def create(self, param: CephStorageClassParameters) -> AnyResource: + """Create a storage class object.""" ns = self.manifests.config["namespace"] - metadata: Dict[str, Any] = dict(name=self.name) - if self.manifests.config.get("default-storage") == metadata["name"]: + metadata: Dict[str, Any] = dict(name=param.storage_class_name) + if self.manifests.config.get("default-storage") == param.storage_class_name: metadata["annotations"] = {"storageclass.kubernetes.io/is-default-class": "true"} - log.info(f"Modelling storage class {metadata['name']}") + log.info(f"Modelling storage class sc='{param.storage_class_name}'") parameters = { - "clusterID": clusterID, - "fsName": fsname, + "clusterID": param.cluster_id, + "fsName": param.filesystem_name, "csi.storage.k8s.io/controller-expand-secret-name": StorageSecret.SECRET_NAME, "csi.storage.k8s.io/controller-expand-secret-namespace": ns, "csi.storage.k8s.io/provisioner-secret-name": StorageSecret.SECRET_NAME, "csi.storage.k8s.io/provisioner-secret-namespace": ns, "csi.storage.k8s.io/node-stage-secret-name": StorageSecret.SECRET_NAME, "csi.storage.k8s.io/node-stage-secret-namespace": ns, - "pool": self.POOL, + "pool": param.data_pool, } mounter = self.manifests.config.get("cephfs-mounter") if mounter in ["kernel", "fuse"]: parameters["mounter"] = mounter - self.update_parameters(parameters) + update_storage_params(self.STORAGE_NAME, self.manifests.config, parameters) return StorageClass.from_dict( dict( @@ -117,10 +123,66 @@ def __call__(self) -> Optional[AnyResource]: ) ) + def parameter_list(self) -> List[CephStorageClassParameters]: + """Accumulate names and settings of the storage classes.""" + enabled = self.manifests.config.get("enabled") + fsid = self.manifests.config.get("fsid") + fs_data: List[CephFilesystem] = self.manifests.config.get(self.FILESYSTEM_LISTING) or [] + formatter = str(self.manifests.config.get(self.STORAGE_NAME_FORMATTER) or "") + + if not enabled: + log.info("Ignore CephFS Storage Class") + return [] + + if not fsid: + log.error("CephFS is missing a filesystem: 'fsid'") + raise ValueError("missing fsid") + + if not fs_data: + log.error("CephFS is missing a filesystem listing: '%s'", self.FILESYSTEM_LISTING) + raise ValueError("missing filesystem listing") + + if not formatter: + log.error("CephFS is missing '%s'", self.STORAGE_NAME_FORMATTER) + raise ValueError(f"empty {self.STORAGE_NAME_FORMATTER}") + + sc_names: List[CephStorageClassParameters] = [] + for fs in fs_data: + for idx, data_pool in enumerate(fs.data_pools): + context = { + "app": self.manifests.model.app.name, + "namespace": self.manifests.config["namespace"], + "name": fs.name, + "pool": data_pool, + "pool-id": fs.data_pool_ids[idx], + } + sc_name = formatter.format(**context) + sc_names += [CephStorageClassParameters(fsid, sc_name, fs.name, data_pool)] + + if len(set(n.storage_class_name for n in sc_names)) != len(sc_names): + log.error( + "The formatter cannot generate unique storage class names for all the available pools. " + "Consider improving the config '%s' to expand to meet the number of pools." + "\n\tfile systems = %s" + "\n\tstorage_classes = %s", + self.STORAGE_NAME_FORMATTER, + fs_data, + sc_names, + ) + raise ValueError(f"{self.STORAGE_NAME_FORMATTER} does not generate unique names") + return sc_names + + def __call__(self) -> List[AnyResource]: + """Craft the storage class object.""" + return [self.create(class_param) for class_param in self.parameter_list()] + class ProvisionerAdjustments(Patch): """Update Cephfs provisioner.""" + PROVISIONER_NAME = "csi-cephfsplugin-provisioner" + PLUGIN_NAME = "csi-cephfsplugin" + def tolerations(self) -> Tuple[List[CephToleration], bool]: cfg = self.manifests.config.get("cephfs-tolerations") or "" if cfg == "$csi-cephfsplugin-legacy$": @@ -133,7 +195,7 @@ def __call__(self, obj: AnyResource) -> None: if ( obj.kind == "Deployment" and obj.metadata - and obj.metadata.name == "csi-cephfsplugin-provisioner" + and obj.metadata.name == self.PROVISIONER_NAME ): obj.spec.replicas = replica = self.manifests.config.get("provisioner-replicas") log.info(f"Updating deployment replicas to {replica}") @@ -145,7 +207,7 @@ def __call__(self, obj: AnyResource) -> None: "enable-host-networking" ) log.info(f"Updating deployment hostNetwork to {host_network}") - if obj.kind == "DaemonSet" and obj.metadata and obj.metadata.name == "csi-cephfsplugin": + if obj.kind == "DaemonSet" and obj.metadata and obj.metadata.name == self.PLUGIN_NAME: obj.spec.template.spec.tolerations = ( tolerations if not legacy else [CephToleration(operator="Exists")] ) @@ -160,7 +222,7 @@ def __call__(self, obj: AnyResource) -> None: for v in obj.spec.template.spec.volumes: if v.hostPath: v.hostPath.path = v.hostPath.path.replace("/var/lib/kubelet", kubelet_dir) - log.info(f"Updating CephFS daemonset kubeletDir to {kubelet_dir}") + log.info(f"Updating daemonset kubeletDir to {kubelet_dir}") class RbacAdjustments(Patch): @@ -234,11 +296,10 @@ def config(self) -> Dict: def evaluate(self) -> Optional[str]: """Determine if manifest_config can be applied to manifests.""" if not self.config.get("enabled"): - # not enabled, not a problem log.info("Skipping CephFS evaluation since it's disabled") return None - props = StorageSecret.REQUIRED_CONFIG.keys() | CephStorageClass.REQUIRED_CONFIG + props = StorageSecret.REQUIRED_CONFIG.keys() for prop in sorted(props): value = self.config.get(prop) if not value: @@ -252,4 +313,10 @@ def evaluate(self) -> Optional[str]: except ValueError as err: return f"Cannot adjust CephFS Pods: {err}" + sc_manipulator = next(m for m in self.manipulations if isinstance(m, CephStorageClass)) + try: + sc_manipulator.parameter_list() + except ValueError as err: + return f"CephFS manifests failed to create storage classes: {err}" + return None diff --git a/src/manifests_rbd.py b/src/manifests_rbd.py index 8db33aa..5c791d4 100644 --- a/src/manifests_rbd.py +++ b/src/manifests_rbd.py @@ -15,7 +15,7 @@ CephToleration, ConfigureLivenessPrometheus, SafeManifest, - StorageClassAddition, + update_storage_params, ) if TYPE_CHECKING: @@ -50,7 +50,7 @@ def __call__(self) -> Optional[AnyResource]: ) -class CephStorageClass(StorageClassAddition): +class CephStorageClass(Addition): """Create ceph storage classes.""" REQUIRED_CONFIG = {"fsid"} @@ -89,7 +89,7 @@ def __call__(self) -> Optional[AnyResource]: "pool": f"{self.fs_type}-pool", } - self.update_parameters(parameters) + update_storage_params(self.name, self.manifests.config, parameters) return StorageClass.from_dict( dict( diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index be77a89..d996691 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -16,7 +16,7 @@ from ops.manifests import ManifestClientError from ops.testing import Harness -from charm import CephCsiCharm +from charm import CephCsiCharm, CephFilesystem @pytest.fixture @@ -128,7 +128,7 @@ def test_ceph_context_getter(ceph_data, check_output, harness, ceph_conf_directo "user": "ceph-csi", "provisioner_replicas": 3, "enable_host_network": "false", - "fsname": None, + "fs_list": [], } assert harness.charm.ceph_context == expected_context @@ -159,7 +159,7 @@ def mocked_handlers(): "check_namespace", "check_ceph_client", "configure_ceph_cli", - "check_cephfs", + "enforce_cephfs_enabled", "evaluate_manifests", "install_manifests", "_update_status", @@ -181,7 +181,7 @@ def test_set_leader(harness): harness.charm.reconciler.stored.reconciled = False # Pretended to not be reconciled with mocked_handlers() as handlers: handlers["_destroying"].return_value = False - handlers["check_cephfs"].return_value = False + handlers["enforce_cephfs_enabled"].return_value = False harness.set_leader(True) assert harness.charm.unit.status.name == "active" assert harness.charm.unit.status.message == "Ready" @@ -426,7 +426,7 @@ def test_action_list_versions(harness): @mock.patch("charm.CephCsiCharm.configure_ceph_cli", mock.MagicMock()) @mock.patch("charm.CephCsiCharm.get_ceph_fsid", mock.MagicMock(return_value="12345")) -@mock.patch("charm.CephCsiCharm.get_ceph_fsname", mock.MagicMock(return_value=None)) +@mock.patch("charm.CephCsiCharm.get_ceph_fs_list", mock.MagicMock(return_value={})) def test_action_manifest_resources(harness): harness.begin() @@ -436,6 +436,7 @@ def test_action_manifest_resources(harness): "config-missing": "ConfigMap/default/ceph-csi-encryption-kms-config", "rbd-missing": "\n".join( [ + "CSIDriver/rbd.csi.ceph.com", "ClusterRole/rbd-csi-nodeplugin", "ClusterRole/rbd-external-provisioner-runner", "ClusterRoleBinding/rbd-csi-nodeplugin", @@ -466,7 +467,7 @@ def test_action_manifest_resources(harness): @mock.patch("charm.CephCsiCharm.configure_ceph_cli", mock.MagicMock()) @mock.patch("charm.CephCsiCharm.get_ceph_fsid", mock.MagicMock(return_value="12345")) -@mock.patch("charm.CephCsiCharm.get_ceph_fsname", mock.MagicMock(return_value=None)) +@mock.patch("charm.CephCsiCharm.get_ceph_fs_list", mock.MagicMock(return_value={})) def test_action_sync_resources_install_failure(harness, lk_client): harness.begin() @@ -518,17 +519,27 @@ def test_failed_cli_fsid(harness): @pytest.mark.parametrize("failure", [SubprocessError, lambda *_: "invalid"]) -def test_failed_cli_fsname(harness, failure): +def test_failed_cli_fs_list(harness, failure): harness.begin() with mock.patch("charm.CephCsiCharm.ceph_cli", side_effect=failure): - assert harness.charm.get_ceph_fsname() is None + assert harness.charm.get_ceph_fs_list() == [] -def test_cli_fsname(request, harness): +def test_cli_fsdata(request, harness): harness.begin() - pools = json.dumps([{"data_pools": ["ceph-fs_data"], "name": request.node.name}]) + fs_name = request.node.name + fs_data = [ + { + "name": f"{fs_name}-alt", + "metadata_pool": f"{fs_name}-alt_metadata", + "metadata_pool_id": 5, + "data_pool_ids": [4], + "data_pools": [f"{fs_name}-alt_data"], + } + ] + pools = json.dumps(fs_data) with mock.patch("charm.CephCsiCharm.ceph_cli", return_value=pools): - assert harness.charm.get_ceph_fsname() == request.node.name + assert harness.charm.get_ceph_fs_list() == [CephFilesystem(**fs) for fs in fs_data] def test_kubelet_dir(harness): @@ -542,31 +553,31 @@ def test_kubelet_dir(harness): assert harness.charm.kubelet_dir == "/var/snap/k8s/common/var/lib/kubelet" -def test_check_cephfs(harness): +@mock.patch("charm.CephCsiCharm._purge_manifest_by_name") +def test_enforce_cephfs_enabled(mock_purge, harness): harness.begin() - # no enable, no fsname -> no problem - with mock.patch("charm.CephCsiCharm.ceph_context", new_callable=mock.PropertyMock) as mock_ctx: - mock_ctx.return_value = {"fsname": None} - with reconcile_this(harness, lambda _: harness.charm.check_cephfs()): - harness.set_leader(True) - harness.update_config({"cephfs-enable": False}) - assert harness.charm.unit.status.name == "active" - - # enable, fsname -> no problem - with mock.patch("charm.CephCsiCharm.ceph_context", new_callable=mock.PropertyMock) as mock_ctx: - mock_ctx.return_value = {"fsname": "abcd"} - with reconcile_this(harness, lambda _: harness.charm.check_cephfs()): - harness.update_config({"cephfs-enable": True}) - assert harness.charm.unit.status.name == "active" - - # enable, no fsname -> problem - with mock.patch("charm.CephCsiCharm.ceph_context", new_callable=mock.PropertyMock) as mock_ctx: - mock_ctx.return_value = {"fsname": None} - with reconcile_this(harness, lambda _: harness.charm.check_cephfs()): - harness.update_config({"cephfs-enable": True}) - assert harness.charm.unit.status.name == "blocked" - assert harness.charm.unit.status.message == "CephFS is not usable; set 'cephfs-enable=False'" + with reconcile_this(harness, lambda _: harness.charm.enforce_cephfs_enabled()): + # disabled on leader, purge + harness.disable_hooks() + harness.set_leader(True) + harness.enable_hooks() + harness.update_config({"cephfs-enable": False}) + assert harness.charm.unit.status.name == "active" + mock_purge.assert_called_once_with("cephfs") + mock_purge.reset_mock() + + # enabled on leader, no purge + harness.update_config({"cephfs-enable": True}) + mock_purge.assert_not_called() + + # disabled on follower, no purge + harness.disable_hooks() + harness.set_leader(False) + harness.enable_hooks() + harness.update_config({"cephfs-enable": False}) + mock_purge.assert_not_called() + assert harness.charm.unit.status.name == "active" @mock.patch("charm.CephCsiCharm.ceph_context", new_callable=mock.PropertyMock) diff --git a/tests/unit/test_manifests_cephfs.py b/tests/unit/test_manifests_cephfs.py index 50ed43f..98d1b1e 100644 --- a/tests/unit/test_manifests_cephfs.py +++ b/tests/unit/test_manifests_cephfs.py @@ -1,11 +1,42 @@ import logging import unittest.mock as mock +from pathlib import Path +import yaml +from lightkube.models.apps_v1 import DaemonSet, Deployment from lightkube.models.meta_v1 import ObjectMeta from lightkube.resources.core_v1 import Secret from lightkube.resources.storage_v1 import StorageClass -from manifests_cephfs import CephFSManifests, CephStorageClass, StorageSecret +from manifests_cephfs import ( + CephFilesystem, + CephFSManifests, + CephStorageClass, + ProvisionerAdjustments, + StorageSecret, +) + +TEST_CEPH_FS = CephFilesystem( + name="ceph-fs", + metadata_pool="ceph-fs_metadata", + metadata_pool_id=1, + data_pool_ids=[2], + data_pools=["ceph-fs_data"], +) + +TEST_CEPH_FS_ALT = CephFilesystem( + name="ceph-fs-alt", + metadata_pool="ceph-fs-alt_metadata", + metadata_pool_id=1, + data_pool_ids=[2], + data_pools=["ceph-fs-alt_data"], +) + +upstream_path = Path(__file__).parent.parent.parent / "upstream" +cephfs_path = upstream_path / "cephfs" +current_path = cephfs_path / "manifests" / (cephfs_path / "version").read_text().strip() +provisioner_path = current_path / (ProvisionerAdjustments.PROVISIONER_NAME + ".yaml") +plugin_path = current_path / (ProvisionerAdjustments.PLUGIN_NAME + ".yaml") def test_storage_secret_modelled(caplog): @@ -41,34 +72,52 @@ def test_storage_secret_modelled(caplog): assert "Modelling secret data for cephfs storage." in caplog.text -def test_ceph_storage_class_modelled(caplog): +def test_ceph_provisioner_adjustment_modelled(caplog): caplog.set_level(logging.INFO) manifest = mock.MagicMock() - csc = CephStorageClass(manifest) + alt_path = "/var/lib/kubelet-alt" + manifest.config = { + "provisioner-replicas": 3, + "enable-host-networking": False, + "kubelet_dir": alt_path, + } + cpa = ProvisionerAdjustments(manifest) + resources = list(yaml.safe_load_all(provisioner_path.read_text())) + resource = Deployment.from_dict(resources[1]) + assert cpa(resource) is None + assert "Updating deployment replicas to 3" in caplog.text + assert "Updating deployment hostNetwork to False" in caplog.text + caplog.clear() - manifest.config = {"enabled": False} - assert csc() is None - assert "Ignore CephFS Storage Class" in caplog.text + resources = list(yaml.safe_load_all(plugin_path.read_text())) + resource = DaemonSet.from_dict(resources[0]) + assert cpa(resource) is None + assert alt_path in resource.spec.template.spec.containers[1].args[2] + assert alt_path in resource.spec.template.spec.containers[0].volumeMounts[1].mountPath + assert "Updating daemonset tolerations" in caplog.text + assert "Updating daemonset kubeletDir to /var/lib/kubelet-alt" in caplog.text - caplog.clear() - manifest.config = {"enabled": True} - assert csc() is None - assert "CephFS is missing required storage item: 'clusterID'" in caplog.text - caplog.clear() +def test_ceph_storage_class_modelled(caplog): + caplog.set_level(logging.INFO) + manifest = mock.MagicMock() + csc = CephStorageClass(manifest) alt_ns = "diff-ns" + manifest.config = { "enabled": True, "fsid": "abcd", - "fsname": "abcd", + "fs_list": [TEST_CEPH_FS_ALT], "namespace": alt_ns, - "default-storage": CephStorageClass.STORAGE_NAME, + "default-storage": TEST_CEPH_FS_ALT.name, "cephfs-mounter": "fuse", + "cephfs-storage-class-name-formatter": "{name}", } + caplog.clear() expected = StorageClass( metadata=ObjectMeta( - name=CephStorageClass.STORAGE_NAME, + name=TEST_CEPH_FS_ALT.name, annotations={"storageclass.kubernetes.io/is-default-class": "true"}, ), provisioner=CephStorageClass.PROVISIONER, @@ -80,15 +129,15 @@ def test_ceph_storage_class_modelled(caplog): "csi.storage.k8s.io/provisioner-secret-namespace": alt_ns, "csi.storage.k8s.io/node-stage-secret-name": StorageSecret.SECRET_NAME, "csi.storage.k8s.io/node-stage-secret-namespace": alt_ns, - "fsName": "abcd", + "fsName": "ceph-fs-alt", "mounter": "fuse", - "pool": "ceph-fs_data", + "pool": "ceph-fs-alt_data", }, allowVolumeExpansion=True, reclaimPolicy="Delete", ) - assert csc() == expected - assert f"Modelling storage class {CephStorageClass.STORAGE_NAME}" in caplog.text + assert csc() == [expected] + assert f"Modelling storage class sc='{TEST_CEPH_FS_ALT.name}'" in caplog.text def test_manifest_evaluation(caplog): @@ -99,21 +148,30 @@ def test_manifest_evaluation(caplog): assert "Skipping CephFS evaluation since it's disabled" in caplog.text charm.config = {"cephfs-enable": True} - assert manifests.evaluate() == "CephFS manifests require the definition of 'fsid'" + assert manifests.evaluate() == "CephFS manifests require the definition of 'kubernetes_key'" - charm.config = { - "cephfs-enable": True, - "fsid": "cluster", - } - assert manifests.evaluate() == "CephFS manifests require the definition of 'fsname'" - - charm.config = { - "cephfs-enable": True, - "user": "cephx", - "fsid": "cluster", - "fsname": "abcd", - "kubernetes_key": "123", - } + charm.config["kubernetes_key"] = "123" + assert manifests.evaluate() == "CephFS manifests require the definition of 'user'" + + charm.config["user"] = "cephx" + err_formatter = "CephFS manifests failed to create storage classes: {}" + assert manifests.evaluate() == err_formatter.format("missing fsid") + + charm.config["fsid"] = "cluster" + assert manifests.evaluate() == err_formatter.format("missing filesystem listing") + + charm.config[CephStorageClass.FILESYSTEM_LISTING] = [TEST_CEPH_FS] + assert manifests.evaluate() == err_formatter.format( + "empty " + CephStorageClass.STORAGE_NAME_FORMATTER + ) + + charm.config[CephStorageClass.FILESYSTEM_LISTING] = [TEST_CEPH_FS, TEST_CEPH_FS_ALT] + charm.config[CephStorageClass.STORAGE_NAME_FORMATTER] = CephStorageClass.STORAGE_NAME + assert manifests.evaluate() == err_formatter.format( + CephStorageClass.STORAGE_NAME_FORMATTER + " does not generate unique names" + ) + + charm.config[CephStorageClass.STORAGE_NAME_FORMATTER] = "cephfs-{name}" assert manifests.evaluate() is None charm.config["cephfs-tolerations"] = "key=value,Foo" diff --git a/upstream/rbd/manifests/v3.10.0/csidriver.yaml b/upstream/rbd/manifests/v3.10.0/csidriver.yaml new file mode 100644 index 0000000..1d1ae5b --- /dev/null +++ b/upstream/rbd/manifests/v3.10.0/csidriver.yaml @@ -0,0 +1,17 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + seLinuxMount: true + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.10.1/csidriver.yaml b/upstream/rbd/manifests/v3.10.1/csidriver.yaml new file mode 100644 index 0000000..1d1ae5b --- /dev/null +++ b/upstream/rbd/manifests/v3.10.1/csidriver.yaml @@ -0,0 +1,17 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + seLinuxMount: true + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.10.2/csidriver.yaml b/upstream/rbd/manifests/v3.10.2/csidriver.yaml new file mode 100644 index 0000000..1d1ae5b --- /dev/null +++ b/upstream/rbd/manifests/v3.10.2/csidriver.yaml @@ -0,0 +1,17 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + seLinuxMount: true + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.11.0/csidriver.yaml b/upstream/rbd/manifests/v3.11.0/csidriver.yaml new file mode 100644 index 0000000..1d1ae5b --- /dev/null +++ b/upstream/rbd/manifests/v3.11.0/csidriver.yaml @@ -0,0 +1,17 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + seLinuxMount: true + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.7.2/csidriver.yaml b/upstream/rbd/manifests/v3.7.2/csidriver.yaml new file mode 100644 index 0000000..77dfac1 --- /dev/null +++ b/upstream/rbd/manifests/v3.7.2/csidriver.yaml @@ -0,0 +1,18 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +# if Kubernetes version is less than 1.18 change +# apiVersion to storage.k8s.io/v1beta1 +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.8.0/csidriver.yaml b/upstream/rbd/manifests/v3.8.0/csidriver.yaml new file mode 100644 index 0000000..1d91061 --- /dev/null +++ b/upstream/rbd/manifests/v3.8.0/csidriver.yaml @@ -0,0 +1,16 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.8.1/csidriver.yaml b/upstream/rbd/manifests/v3.8.1/csidriver.yaml new file mode 100644 index 0000000..1d91061 --- /dev/null +++ b/upstream/rbd/manifests/v3.8.1/csidriver.yaml @@ -0,0 +1,16 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + fsGroupPolicy: File diff --git a/upstream/rbd/manifests/v3.9.0/csidriver.yaml b/upstream/rbd/manifests/v3.9.0/csidriver.yaml new file mode 100644 index 0000000..1d1ae5b --- /dev/null +++ b/upstream/rbd/manifests/v3.9.0/csidriver.yaml @@ -0,0 +1,17 @@ +# +# /!\ DO NOT MODIFY THIS FILE +# +# This file has been automatically generated by Ceph-CSI yamlgen. +# The source for the contents can be found in the api/deploy directory, make +# your modifications there. +# +--- +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "rbd.csi.ceph.com" +spec: + attachRequired: true + podInfoOnMount: false + seLinuxMount: true + fsGroupPolicy: File