Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix issue with label visualisation #811

Merged
merged 6 commits into from
Nov 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,12 @@ def run(
if text == "Class":
labels = predictions["class_name"]
elif text == "Tracker Id":
labels = [str(t) if t else "" for t in predictions.tracker_id]
if predictions.tracker_id is not None:
labels = [
str(t) if t else "No Tracker ID" for t in predictions.tracker_id
]
else:
labels = ["No Tracker ID"] * len(predictions)
elif text == "Time In Zone":
if "time_in_zone" in predictions.data:
labels = [
Expand Down Expand Up @@ -241,7 +246,6 @@ def run(
labels = [str(d) if d else "" for d in predictions[text]]
except Exception:
raise ValueError(f"Invalid text type: {text}")

annotated_image = annotator.annotate(
scene=image.numpy_image.copy() if copy_image else image.numpy_image,
detections=predictions,
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import numpy as np

from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS
from inference.core.managers.base import ModelManager
from inference.core.workflows.core_steps.common.entities import StepExecutionMode
from inference.core.workflows.execution_engine.core import ExecutionEngine

VISUALIZATION_WORKFLOW = {
"version": "1.0",
"inputs": [
{"type": "WorkflowImage", "name": "image"},
{
"type": "WorkflowParameter",
"name": "model_id",
"default_value": "yolov8n-640",
},
{"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3},
],
"steps": [
{
"type": "RoboflowObjectDetectionModel",
"name": "detection",
"image": "$inputs.image",
"model_id": "$inputs.model_id",
"confidence": "$inputs.confidence",
},
{
"type": "roboflow_core/label_visualization@v1",
"name": "label_visualization",
"predictions": "$steps.detection.predictions",
"image": "$inputs.image",
"text": "Tracker Id",
},
],
"outputs": [
{"type": "JsonField", "name": "result", "selector": "$steps.detection.*"},
{
"type": "JsonField",
"name": "visualized",
"selector": "$steps.label_visualization.image",
},
],
}


def test_workflow_when_detections_are_not_present(
model_manager: ModelManager,
crowd_image: np.ndarray,
) -> None:
"""This test covers bug in label annotator block."""
# given
workflow_init_parameters = {
"workflows_core.model_manager": model_manager,
"workflows_core.api_key": None,
"workflows_core.step_execution_mode": StepExecutionMode.LOCAL,
}
execution_engine = ExecutionEngine.init(
workflow_definition=VISUALIZATION_WORKFLOW,
init_parameters=workflow_init_parameters,
max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS,
)

# when
result = execution_engine.run(
runtime_parameters={"image": crowd_image, "confidence": 0.99999}
)

# then
assert isinstance(result, list), "Expected result to be list"
assert len(result) == 1, "Single image provided - single output expected"
assert (
len(result[0]["result"]["predictions"]) == 0
), "Expected no predictions to be delivered"