Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sarc 319 log trace diskusage #107

Draft
wants to merge 8 commits into
base: master
Choose a base branch
from
29 changes: 19 additions & 10 deletions sarc/cli/acquire/storages.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import logging
from dataclasses import dataclass

from opentelemetry.trace import Status, StatusCode, get_tracer
from simple_parsing import field

from sarc.config import config
Expand All @@ -18,22 +20,29 @@ class AcquireStorages:
cluster_names: list[str] = field(alias=["-c"], default_factory=list)
dry: bool = False

tracer = get_tracer("AcquireStorages")

def execute(self) -> int:
cfg = config()

for cluster_name in self.cluster_names:
print(f"Acquiring {cluster_name} storages report...")
with self.tracer.start_as_current_span("cluster") as span:
bouthilx marked this conversation as resolved.
Show resolved Hide resolved
span.set_attribute("cluster_name", cluster_name)

logging.info(f"Acquiring {cluster_name} storages report...")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Faudrait qu'on décide si on veut les arguments séparé pour pouvoir envoyer en json. Sinon faudrait mettre l'exception pylint dans la config.


cluster = cfg.clusters[cluster_name]

cluster = cfg.clusters[cluster_name]
fetch_diskusage = methods.get(cluster_name, methods["default"])
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Il y aurait certainement des parties à instrumenter dans les implémentations de fetch_diskusage.

du = fetch_diskusage(cluster)

fetch_diskusage = methods.get(cluster_name, methods["default"])
du = fetch_diskusage(cluster)
if not self.dry:
collection = get_diskusage_collection()
collection.add(du)
else:
print("Document:")
print(du.json(indent=2))

if not self.dry:
collection = get_diskusage_collection()
collection.add(du)
else:
print("Document:")
print(du.json(indent=2))
span.set_status(Status(StatusCode.OK))
nurbal marked this conversation as resolved.
Show resolved Hide resolved

return 0
15 changes: 14 additions & 1 deletion tests/functional/diskusage/test_mila_diskusage.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
import logging
from pathlib import Path

import pytest
from opentelemetry.trace import Status, StatusCode, get_tracer

import sarc.storage.mila
from sarc.ldap.api import Credentials, User
Expand Down Expand Up @@ -62,9 +64,10 @@ def mock_get_users():
"test_config", [{"clusters": {"mila": {"host": "mila"}}}], indirect=True
)
def test_mila_fetch_diskusage_single(
test_config, monkeypatch, cli_main, file_regression
test_config, monkeypatch, cli_main, file_regression, caplog, captrace
):
count = 0
caplog.set_level(logging.INFO)

def mock_get_report(*args):
nonlocal count
Expand Down Expand Up @@ -93,6 +96,16 @@ def mock_get_report(*args):
# report = sarc.storage.mila.fetch_diskusage_report(cluster=test_config.clusters["mila"])
file_regression.check(data[0].json(exclude={"id": True}, indent=4))

# check logging
assert caplog.text.__contains__("Acquiring mila storages report...")

# check traces
traces = captrace.get_finished_spans()
assert len(traces) == 1 # only one cluster was acquired
assert traces[0].name == "cluster"
assert traces[0].attributes["cluster_name"] == "mila"
assert traces[0].status.status_code == StatusCode.OK


@pytest.mark.freeze_time("2023-07-25")
@pytest.mark.parametrize(
Expand Down
Loading