From 9661b487fc1d9b3e86c1064cf16c7a504e494be1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 30 Oct 2024 10:47:48 +0100 Subject: [PATCH 01/67] WIP - first changes to add serializers and deserializers --- inference/core/workflows/core_steps/loader.py | 3 + .../introspection/blocks_loader.py | 59 ++++++++++++++++++- 2 files changed, 61 insertions(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index d96b79f18..55b1506c8 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -337,6 +337,9 @@ "allowed_write_directory": WORKFLOW_BLOCKS_WRITE_DIRECTORY, } +KINDS_SERIALIZERS = {} +KINDS_DESERIALIZERS = {} + def load_blocks() -> List[Type[WorkflowBlock]]: return [ diff --git a/inference/core/workflows/execution_engine/introspection/blocks_loader.py b/inference/core/workflows/execution_engine/introspection/blocks_loader.py index dc1bdffe1..ae3cb54c8 100644 --- a/inference/core/workflows/execution_engine/introspection/blocks_loader.py +++ b/inference/core/workflows/execution_engine/introspection/blocks_loader.py @@ -2,6 +2,7 @@ import logging import os from collections import Counter +from copy import copy from functools import lru_cache from typing import Any, Callable, Dict, List, Optional, Union @@ -11,7 +12,7 @@ from inference.core.workflows.core_steps.loader import ( REGISTERED_INITIALIZERS, load_blocks, - load_kinds, + load_kinds, KINDS_SERIALIZERS, KINDS_DESERIALIZERS, ) from inference.core.workflows.errors import ( PluginInterfaceError, @@ -399,6 +400,62 @@ def _load_plugin_kinds(plugin_name: str) -> List[Kind]: return kinds +def load_kinds_serializers() -> Dict[str, Callable[[Any], Any]]: + kinds_serializers = copy(KINDS_SERIALIZERS) + plugin_kinds_serializers = load_plugins_serialization_functions(module_property="KINDS_SERIALIZERS") + kinds_serializers.update(plugin_kinds_serializers) + return kinds_serializers + + +def load_kinds_deserializers() -> Dict[str, Callable[[Any], Any]]: + kinds_deserializers = copy(KINDS_DESERIALIZERS) + plugin_kinds_deserializers = load_plugins_serialization_functions(module_property="KINDS_DESERIALIZERS") + kinds_deserializers.update(plugin_kinds_deserializers) + return kinds_deserializers + + +def load_plugins_serialization_functions(module_property: str) -> Dict[str, Callable[[Any], Any]]: + plugins_to_load = get_plugin_modules() + result = {} + for plugin_name in plugins_to_load: + result.update(load_plugin_serializers(plugin_name=plugin_name, module_property=module_property)) + return result + + +def load_plugin_serializers(plugin_name: str, module_property: str) -> Dict[str, Callable[[Any], Any]]: + try: + return _load_plugin_serializers(plugin_name=plugin_name, module_property=module_property) + except ImportError as e: + raise PluginLoadingError( + public_message=f"It is not possible to load kinds serializers from workflow plugin `{plugin_name}`. " + f"Make sure the library providing custom step is correctly installed in Python environment.", + context="blocks_loading", + inner_error=e, + ) from e + except AttributeError as e: + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " + f"interface correctly and cannot be loaded.", + context="blocks_loading", + inner_error=e, + ) from e + + +def _load_plugin_serializers(plugin_name: str, module_property: str) -> Dict[str, Callable[[Any], Any]]: + module = importlib.import_module(plugin_name) + if not hasattr(module, module_property): + return {} + kinds_serializers = getattr(module, module_property) + if not isinstance(kinds_serializers, dict): + raise PluginInterfaceError( + public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " + f"interface correctly and cannot be loaded. `{module_property}` is expected to be " + f"dictionary.", + context="blocks_loading", + ) + return kinds_serializers + + def get_plugin_modules() -> List[str]: plugins_to_load = os.environ.get(WORKFLOWS_PLUGINS_ENV) if plugins_to_load is None: From 38fc7e5e1d418b49f9d1f889487bfc3af2b5748e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 30 Oct 2024 15:39:52 +0100 Subject: [PATCH 02/67] Create first scratch of implementation for serializers and deserializers --- .../interfaces/http/handlers/workflows.py | 18 ++ inference/core/interfaces/http/http_api.py | 12 +- .../core/interfaces/http/orjson_utils.py | 55 +--- .../core_steps/common/deserializers.py | 225 ++++++++++++++++ .../core_steps/common/serializers.py | 54 +++- .../core/workflows/core_steps/common/utils.py | 2 +- inference/core/workflows/core_steps/loader.py | 34 ++- .../core/workflows/execution_engine/core.py | 2 + .../execution_engine/entities/base.py | 11 +- .../execution_engine/entities/engine.py | 1 + .../introspection/blocks_loader.py | 58 ++++- .../execution_engine/v1/compiler/core.py | 10 + .../execution_engine/v1/compiler/entities.py | 12 +- .../v1/compiler/graph_constructor.py | 34 ++- .../workflows/execution_engine/v1/core.py | 4 + .../execution_engine/v1/executor/core.py | 6 +- .../v1/executor/output_constructor.py | 87 ++++++- .../v1/executor/runtime_input_assembler.py | 242 ++++++------------ tests/conftest.py | 1 + .../compiler/test_graph_constructor.py | 2 + .../executor/test_output_constructor.py | 5 + .../executor/test_runtime_input_assembler.py | 24 +- 22 files changed, 648 insertions(+), 251 deletions(-) create mode 100644 inference/core/workflows/core_steps/common/deserializers.py diff --git a/inference/core/interfaces/http/handlers/workflows.py b/inference/core/interfaces/http/handlers/workflows.py index ad9864576..e71c0d19b 100644 --- a/inference/core/interfaces/http/handlers/workflows.py +++ b/inference/core/interfaces/http/handlers/workflows.py @@ -137,3 +137,21 @@ def get_unique_kinds( for output_field_kinds in output_definition.values(): all_kinds.update(output_field_kinds) return all_kinds + + +def filter_out_unwanted_workflow_outputs( + workflow_results: List[dict], + excluded_fields: Optional[List[str]], +) -> List[dict]: + if not excluded_fields: + return workflow_results + excluded_fields = set(excluded_fields) + filtered_results = [] + for result_element in workflow_results: + filtered_result = {} + for key, value in result_element.items(): + if key in excluded_fields: + continue + filtered_result[key] = value + filtered_results.append(filtered_result) + return filtered_results diff --git a/inference/core/interfaces/http/http_api.py b/inference/core/interfaces/http/http_api.py index 231f69eb6..5a95c6f51 100644 --- a/inference/core/interfaces/http/http_api.py +++ b/inference/core/interfaces/http/http_api.py @@ -155,6 +155,7 @@ ) from inference.core.interfaces.base import BaseInterface from inference.core.interfaces.http.handlers.workflows import ( + filter_out_unwanted_workflow_outputs, handle_describe_workflows_blocks_request, handle_describe_workflows_interface, ) @@ -722,13 +723,16 @@ def process_workflow_inference_request( prevent_local_images_loading=True, profiler=profiler, ) - result = execution_engine.run(runtime_parameters=workflow_request.inputs) + workflow_results = execution_engine.run( + runtime_parameters=workflow_request.inputs, + serialize_results=True, + ) with profiler.profile_execution_phase( - name="workflow_results_serialisation", + name="workflow_results_filtering", categories=["inference_package_operation"], ): - outputs = serialise_workflow_result( - result=result, + outputs = filter_out_unwanted_workflow_outputs( + workflow_results=workflow_results, excluded_fields=workflow_request.excluded_fields, ) profiler_trace = profiler.export_trace() diff --git a/inference/core/interfaces/http/orjson_utils.py b/inference/core/interfaces/http/orjson_utils.py index 27ecb17d7..aa91baa8a 100644 --- a/inference/core/interfaces/http/orjson_utils.py +++ b/inference/core/interfaces/http/orjson_utils.py @@ -2,7 +2,6 @@ from typing import Any, Dict, List, Optional, Union import orjson -import supervision as sv from fastapi.responses import ORJSONResponse from pydantic import BaseModel @@ -10,10 +9,8 @@ from inference.core.utils.function import deprecated from inference.core.utils.image_utils import ImageType from inference.core.workflows.core_steps.common.serializers import ( - serialise_image, - serialise_sv_detections, + serialize_wildcard_kind, ) -from inference.core.workflows.execution_engine.entities.base import WorkflowImageData class ORJSONResponseBytes(ORJSONResponse): @@ -44,6 +41,11 @@ def orjson_response( return ORJSONResponseBytes(content=content) +@deprecated( + reason="Function serialise_workflow_result(...) will be removed from `inference` end of Q1 2025. " + "Workflows ecosystem shifted towards internal serialization - see Workflows docs: " + "https://inference.roboflow.com/workflows/about/" +) def serialise_workflow_result( result: List[Dict[str, Any]], excluded_fields: Optional[List[str]] = None, @@ -57,6 +59,11 @@ def serialise_workflow_result( ] +@deprecated( + reason="Function serialise_single_workflow_result_element(...) will be removed from `inference` end of Q1 2025. " + "Workflows ecosystem shifted towards internal serialization - see Workflows docs: " + "https://inference.roboflow.com/workflows/about/" +) def serialise_single_workflow_result_element( result_element: Dict[str, Any], excluded_fields: Optional[List[str]] = None, @@ -68,45 +75,7 @@ def serialise_single_workflow_result_element( for key, value in result_element.items(): if key in excluded_fields: continue - if isinstance(value, WorkflowImageData): - value = serialise_image(image=value) - elif isinstance(value, dict): - value = serialise_dict(elements=value) - elif isinstance(value, list): - value = serialise_list(elements=value) - elif isinstance(value, sv.Detections): - value = serialise_sv_detections(detections=value) - serialised_result[key] = value - return serialised_result - - -def serialise_list(elements: List[Any]) -> List[Any]: - result = [] - for element in elements: - if isinstance(element, WorkflowImageData): - element = serialise_image(image=element) - elif isinstance(element, dict): - element = serialise_dict(elements=element) - elif isinstance(element, list): - element = serialise_list(elements=element) - elif isinstance(element, sv.Detections): - element = serialise_sv_detections(detections=element) - result.append(element) - return result - - -def serialise_dict(elements: Dict[str, Any]) -> Dict[str, Any]: - serialised_result = {} - for key, value in elements.items(): - if isinstance(value, WorkflowImageData): - value = serialise_image(image=value) - elif isinstance(value, dict): - value = serialise_dict(elements=value) - elif isinstance(value, list): - value = serialise_list(elements=value) - elif isinstance(value, sv.Detections): - value = serialise_sv_detections(detections=value) - serialised_result[key] = value + serialised_result[key] = serialize_wildcard_kind(value=value) return serialised_result diff --git a/inference/core/workflows/core_steps/common/deserializers.py b/inference/core/workflows/core_steps/common/deserializers.py new file mode 100644 index 000000000..0449cdeef --- /dev/null +++ b/inference/core/workflows/core_steps/common/deserializers.py @@ -0,0 +1,225 @@ +import os +from typing import Any, List +from uuid import uuid4 + +import cv2 +import numpy as np +import supervision as sv +from pydantic import ValidationError + +from inference.core.utils.image_utils import ( + attempt_loading_image_from_string, + load_image_from_url, +) +from inference.core.workflows.core_steps.common.utils import ( + add_inference_keypoints_to_sv_detections, +) +from inference.core.workflows.errors import RuntimeInputError +from inference.core.workflows.execution_engine.constants import ( + BOUNDING_RECT_ANGLE_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_ANGLE_KEY_IN_SV_DETECTIONS, + BOUNDING_RECT_HEIGHT_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_HEIGHT_KEY_IN_SV_DETECTIONS, + BOUNDING_RECT_RECT_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_RECT_KEY_IN_SV_DETECTIONS, + BOUNDING_RECT_WIDTH_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_WIDTH_KEY_IN_SV_DETECTIONS, + DETECTED_CODE_KEY, + DETECTION_ID_KEY, + IMAGE_DIMENSIONS_KEY, + KEYPOINTS_KEY_IN_INFERENCE_RESPONSE, + PARENT_ID_KEY, + PATH_DEVIATION_KEY_IN_INFERENCE_RESPONSE, + PATH_DEVIATION_KEY_IN_SV_DETECTIONS, + TIME_IN_ZONE_KEY_IN_INFERENCE_RESPONSE, + TIME_IN_ZONE_KEY_IN_SV_DETECTIONS, +) +from inference.core.workflows.execution_engine.entities.base import ( + ImageParentMetadata, + VideoMetadata, + WorkflowImageData, +) + + +def deserialize_image_kind( + parameter: str, + image: Any, + prevent_local_images_loading: bool = False, +) -> WorkflowImageData: + video_metadata = None + if isinstance(image, dict) and "video_metadata" in image: + video_metadata = deserialize_video_metadata_kind( + parameter=parameter, video_metadata=image["video_metadata"] + ) + if isinstance(image, dict) and isinstance(image.get("value"), np.ndarray): + image = image["value"] + if isinstance(image, np.ndarray): + parent_metadata = ImageParentMetadata(parent_id=parameter) + return WorkflowImageData( + parent_metadata=parent_metadata, + numpy_image=image, + video_metadata=video_metadata, + ) + try: + if isinstance(image, dict): + image = image["value"] + if isinstance(image, str): + base64_image = None + image_reference = None + if image.startswith("http://") or image.startswith("https://"): + image_reference = image + image = load_image_from_url(value=image) + elif not prevent_local_images_loading and os.path.exists(image): + # prevent_local_images_loading is introduced to eliminate + # server vulnerability - namely it prevents local server + # file system from being exploited. + image_reference = image + image = cv2.imread(image) + else: + base64_image = image + image = attempt_loading_image_from_string(image)[0] + parent_metadata = ImageParentMetadata(parent_id=parameter) + return WorkflowImageData( + parent_metadata=parent_metadata, + numpy_image=image, + base64_image=base64_image, + image_reference=image_reference, + video_metadata=video_metadata, + ) + except Exception as error: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` defined as `WorkflowImage` " + f"that is invalid. Failed on input validation. Details: {error}", + context="workflow_execution | runtime_input_validation", + ) from error + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` defined as `WorkflowImage` " + f"with type {type(image)} that is invalid. Workflows accept only np.arrays " + f"and dicts with keys `type` and `value` compatible with `inference` (or list of them).", + context="workflow_execution | runtime_input_validation", + ) + + +def deserialize_video_metadata_kind( + parameter: str, + video_metadata: Any, +) -> VideoMetadata: + if isinstance(video_metadata, VideoMetadata): + return video_metadata + if not isinstance(video_metadata, dict): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` holding " + f"`WorkflowVideoMetadata`, but provided value is not a dict.", + context="workflow_execution | runtime_input_validation", + ) + try: + return VideoMetadata.model_validate(video_metadata) + except ValidationError as error: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` holding " + f"`WorkflowVideoMetadata`, but provided value is malformed. " + f"See details in inner error.", + context="workflow_execution | runtime_input_validation", + inner_error=error, + ) + + +def deserialize_detections_kind( + parameter: str, + detections: Any, +) -> sv.Detections: + if isinstance(detections, sv.Detections): + return detections + if not isinstance(detections, dict): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"detections, but invalid type of data found.", + context="workflow_execution | runtime_input_validation", + ) + if "predictions" not in detections or "image" not in detections: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"detections, but dictionary misses required keys.", + context="workflow_execution | runtime_input_validation", + ) + parsed_detections = sv.Detections.from_inference(detections) + if len(parsed_detections) == 0: + return parsed_detections + height, width = detections["image"]["height"], detections["image"]["width"] + image_metadata = np.array([[height, width]] * len(parsed_detections)) + parsed_detections.data[IMAGE_DIMENSIONS_KEY] = image_metadata + detection_ids = [ + detection.get(DETECTION_ID_KEY, str(uuid4())) + for detection in detections["predictions"] + ] + parsed_detections.data[DETECTION_ID_KEY] = np.array(detection_ids) + parent_ids = [ + detection.get(PARENT_ID_KEY, parameter) + for detection in detections["predictions"] + ] + detections[PARENT_ID_KEY] = np.array(parent_ids) + optional_elements_keys = [ + (PATH_DEVIATION_KEY_IN_INFERENCE_RESPONSE, PATH_DEVIATION_KEY_IN_SV_DETECTIONS), + (TIME_IN_ZONE_KEY_IN_INFERENCE_RESPONSE, TIME_IN_ZONE_KEY_IN_SV_DETECTIONS), + ( + BOUNDING_RECT_ANGLE_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_ANGLE_KEY_IN_SV_DETECTIONS, + ), + ( + BOUNDING_RECT_RECT_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_RECT_KEY_IN_SV_DETECTIONS, + ), + ( + BOUNDING_RECT_HEIGHT_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_HEIGHT_KEY_IN_SV_DETECTIONS, + ), + ( + BOUNDING_RECT_WIDTH_KEY_IN_INFERENCE_RESPONSE, + BOUNDING_RECT_WIDTH_KEY_IN_SV_DETECTIONS, + ), + (DETECTED_CODE_KEY, DETECTED_CODE_KEY), + ] + for raw_detection_key, parsed_detection_key in optional_elements_keys: + parsed_detections = _attach_optional_detection_element( + raw_detections=detections["predictions"], + parsed_detections=parsed_detections, + raw_detection_key=raw_detection_key, + parsed_detection_key=parsed_detection_key, + ) + return _attach_optional_key_points_detections( + raw_detections=detections["predictions"], + parsed_detections=parsed_detections, + ) + + +def _attach_optional_detection_element( + raw_detections: List[dict], + parsed_detections: sv.Detections, + raw_detection_key: str, + parsed_detection_key: str, +) -> sv.Detections: + if raw_detection_key not in raw_detections[0]: + return parsed_detections + result = [] + for detection in raw_detections: + result.append(detection[raw_detection_key]) + parsed_detections.data[parsed_detection_key] = np.array(result) + return parsed_detections + + +def _attach_optional_key_points_detections( + raw_detections: List[dict], + parsed_detections: sv.Detections, +) -> sv.Detections: + if KEYPOINTS_KEY_IN_INFERENCE_RESPONSE not in raw_detections[0]: + return parsed_detections + return add_inference_keypoints_to_sv_detections( + inference_prediction=raw_detections, + detections=parsed_detections, + ) + + +def deserialize_numpy_array(parameter: str, raw_array: Any) -> np.ndarray: + if isinstance(raw_array, np.ndarray): + return raw_array + return np.array(raw_array) diff --git a/inference/core/workflows/core_steps/common/serializers.py b/inference/core/workflows/core_steps/common/serializers.py index 736261d00..512df70b3 100644 --- a/inference/core/workflows/core_steps/common/serializers.py +++ b/inference/core/workflows/core_steps/common/serializers.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, List import numpy as np import supervision as sv @@ -35,7 +35,10 @@ X_KEY, Y_KEY, ) -from inference.core.workflows.execution_engine.entities.base import WorkflowImageData +from inference.core.workflows.execution_engine.entities.base import ( + VideoMetadata, + WorkflowImageData, +) def serialise_sv_detections(detections: sv.Detections) -> dict: @@ -143,4 +146,51 @@ def serialise_image(image: WorkflowImageData) -> Dict[str, Any]: return { "type": "base64", "value": image.base64_image, + "video_metadata": image.video_metadata.dict(), } + + +def serialize_video_metadata_kind(video_metadata: VideoMetadata) -> dict: + return video_metadata.dict() + + +def serialize_wildcard_kind(value: Any) -> Any: + if isinstance(value, WorkflowImageData): + value = serialise_image(image=value) + elif isinstance(value, dict): + value = serialise_dict(elements=value) + elif isinstance(value, list): + value = serialise_list(elements=value) + elif isinstance(value, sv.Detections): + value = serialise_sv_detections(detections=value) + return value + + +def serialise_list(elements: List[Any]) -> List[Any]: + result = [] + for element in elements: + if isinstance(element, WorkflowImageData): + element = serialise_image(image=element) + elif isinstance(element, dict): + element = serialise_dict(elements=element) + elif isinstance(element, list): + element = serialise_list(elements=element) + elif isinstance(element, sv.Detections): + element = serialise_sv_detections(detections=element) + result.append(element) + return result + + +def serialise_dict(elements: Dict[str, Any]) -> Dict[str, Any]: + serialised_result = {} + for key, value in elements.items(): + if isinstance(value, WorkflowImageData): + value = serialise_image(image=value) + elif isinstance(value, dict): + value = serialise_dict(elements=value) + elif isinstance(value, list): + value = serialise_list(elements=value) + elif isinstance(value, sv.Detections): + value = serialise_sv_detections(detections=value) + serialised_result[key] = value + return serialised_result diff --git a/inference/core/workflows/core_steps/common/utils.py b/inference/core/workflows/core_steps/common/utils.py index 138afe9c0..d8fc916f4 100644 --- a/inference/core/workflows/core_steps/common/utils.py +++ b/inference/core/workflows/core_steps/common/utils.py @@ -100,7 +100,7 @@ def convert_inference_detections_batch_to_sv_detections( detections = sv.Detections.from_inference(p) parent_ids = [d.get(PARENT_ID_KEY, "") for d in p[predictions_key]] detection_ids = [ - d.get(DETECTION_ID_KEY, str(uuid.uuid4)) for d in p[predictions_key] + d.get(DETECTION_ID_KEY, str(uuid.uuid4())) for d in p[predictions_key] ] detections[DETECTION_ID_KEY] = np.array(detection_ids) detections[PARENT_ID_KEY] = np.array(parent_ids) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 55b1506c8..a1fc50499 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -68,7 +68,19 @@ from inference.core.workflows.core_steps.classical_cv.threshold.v1 import ( ImageThresholdBlockV1, ) +from inference.core.workflows.core_steps.common.deserializers import ( + deserialize_detections_kind, + deserialize_image_kind, + deserialize_numpy_array, + deserialize_video_metadata_kind, +) from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.serializers import ( + serialise_image, + serialise_sv_detections, + serialize_video_metadata_kind, + serialize_wildcard_kind, +) from inference.core.workflows.core_steps.flow_control.continue_if.v1 import ( ContinueIfBlockV1, ) @@ -337,8 +349,26 @@ "allowed_write_directory": WORKFLOW_BLOCKS_WRITE_DIRECTORY, } -KINDS_SERIALIZERS = {} -KINDS_DESERIALIZERS = {} +KINDS_SERIALIZERS = { + IMAGE_KIND.name: serialise_image, + VIDEO_METADATA_KIND.name: serialize_video_metadata_kind, + OBJECT_DETECTION_PREDICTION_KIND.name: serialise_sv_detections, + INSTANCE_SEGMENTATION_PREDICTION_KIND.name: serialise_sv_detections, + KEYPOINT_DETECTION_PREDICTION_KIND.name: serialise_sv_detections, + QR_CODE_DETECTION_KIND.name: serialise_sv_detections, + BAR_CODE_DETECTION_KIND.name: serialise_sv_detections, + WILDCARD_KIND.name: serialize_wildcard_kind, +} +KINDS_DESERIALIZERS = { + IMAGE_KIND.name: deserialize_image_kind, + VIDEO_METADATA_KIND.name: deserialize_video_metadata_kind, + OBJECT_DETECTION_PREDICTION_KIND.name: deserialize_detections_kind, + INSTANCE_SEGMENTATION_PREDICTION_KIND.name: deserialize_detections_kind, + KEYPOINT_DETECTION_PREDICTION_KIND.name: deserialize_detections_kind, + QR_CODE_DETECTION_KIND.name: deserialize_detections_kind, + BAR_CODE_DETECTION_KIND.name: deserialize_detections_kind, + NUMPY_ARRAY_KIND.name: deserialize_numpy_array, +} def load_blocks() -> List[Type[WorkflowBlock]]: diff --git a/inference/core/workflows/execution_engine/core.py b/inference/core/workflows/execution_engine/core.py index 955bae3cf..f365af339 100644 --- a/inference/core/workflows/execution_engine/core.py +++ b/inference/core/workflows/execution_engine/core.py @@ -65,11 +65,13 @@ def run( runtime_parameters: Dict[str, Any], fps: float = 0, _is_preview: bool = False, + serialize_results: bool = False, ) -> List[Dict[str, Any]]: return self._engine.run( runtime_parameters=runtime_parameters, fps=fps, _is_preview=_is_preview, + serialize_results=serialize_results, ) diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index d09dccab0..9c638774c 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -55,6 +55,9 @@ def get_type(self) -> str: class WorkflowInput(BaseModel): + type: str + name: str + kind: List[Kind] @classmethod def is_batch_oriented(cls) -> bool: @@ -81,6 +84,12 @@ def is_batch_oriented(cls) -> bool: return True +class WorkflowDataBatch(WorkflowInput): + type: Literal["WorkflowDataBatch"] + name: str + kind: List[Kind] = Field(default_factory=lambda: [WILDCARD_KIND]) + + class WorkflowParameter(WorkflowInput): type: Literal["WorkflowParameter", "InferenceParameter"] name: str @@ -91,7 +100,7 @@ class WorkflowParameter(WorkflowInput): InputType = Annotated[ - Union[WorkflowImage, WorkflowVideoMetadata, WorkflowParameter], + Union[WorkflowImage, WorkflowVideoMetadata, WorkflowParameter, WorkflowDataBatch], Field(discriminator="type"), ] diff --git a/inference/core/workflows/execution_engine/entities/engine.py b/inference/core/workflows/execution_engine/entities/engine.py index 1539d0375..e0b7248f4 100644 --- a/inference/core/workflows/execution_engine/entities/engine.py +++ b/inference/core/workflows/execution_engine/entities/engine.py @@ -25,5 +25,6 @@ def run( runtime_parameters: Dict[str, Any], fps: float = 0, _is_preview: bool = False, + serialize_results: bool = False, ) -> List[Dict[str, Any]]: pass diff --git a/inference/core/workflows/execution_engine/introspection/blocks_loader.py b/inference/core/workflows/execution_engine/introspection/blocks_loader.py index ae3cb54c8..7871f40b8 100644 --- a/inference/core/workflows/execution_engine/introspection/blocks_loader.py +++ b/inference/core/workflows/execution_engine/introspection/blocks_loader.py @@ -10,9 +10,11 @@ from packaging.version import Version from inference.core.workflows.core_steps.loader import ( + KINDS_DESERIALIZERS, + KINDS_SERIALIZERS, REGISTERED_INITIALIZERS, load_blocks, - load_kinds, KINDS_SERIALIZERS, KINDS_DESERIALIZERS, + load_kinds, ) from inference.core.workflows.errors import ( PluginInterfaceError, @@ -400,48 +402,76 @@ def _load_plugin_kinds(plugin_name: str) -> List[Kind]: return kinds -def load_kinds_serializers() -> Dict[str, Callable[[Any], Any]]: +@execution_phase( + name="kinds_serializers_loading", + categories=["execution_engine_operation"], +) +def load_kinds_serializers( + profiler: Optional[WorkflowsProfiler] = None, +) -> Dict[str, Callable[[Any], Any]]: kinds_serializers = copy(KINDS_SERIALIZERS) - plugin_kinds_serializers = load_plugins_serialization_functions(module_property="KINDS_SERIALIZERS") + plugin_kinds_serializers = load_plugins_serialization_functions( + module_property="KINDS_SERIALIZERS" + ) kinds_serializers.update(plugin_kinds_serializers) return kinds_serializers -def load_kinds_deserializers() -> Dict[str, Callable[[Any], Any]]: +@execution_phase( + name="kinds_deserializers_loading", + categories=["execution_engine_operation"], +) +def load_kinds_deserializers( + profiler: Optional[WorkflowsProfiler] = None, +) -> Dict[str, Callable[[str, Any], Any]]: kinds_deserializers = copy(KINDS_DESERIALIZERS) - plugin_kinds_deserializers = load_plugins_serialization_functions(module_property="KINDS_DESERIALIZERS") + plugin_kinds_deserializers = load_plugins_serialization_functions( + module_property="KINDS_DESERIALIZERS" + ) kinds_deserializers.update(plugin_kinds_deserializers) return kinds_deserializers -def load_plugins_serialization_functions(module_property: str) -> Dict[str, Callable[[Any], Any]]: +def load_plugins_serialization_functions( + module_property: str, +) -> Dict[str, Callable[[Any], Any]]: plugins_to_load = get_plugin_modules() result = {} for plugin_name in plugins_to_load: - result.update(load_plugin_serializers(plugin_name=plugin_name, module_property=module_property)) + result.update( + load_plugin_serializers( + plugin_name=plugin_name, module_property=module_property + ) + ) return result -def load_plugin_serializers(plugin_name: str, module_property: str) -> Dict[str, Callable[[Any], Any]]: +def load_plugin_serializers( + plugin_name: str, module_property: str +) -> Dict[str, Callable[[Any], Any]]: try: - return _load_plugin_serializers(plugin_name=plugin_name, module_property=module_property) + return _load_plugin_serializers( + plugin_name=plugin_name, module_property=module_property + ) except ImportError as e: raise PluginLoadingError( public_message=f"It is not possible to load kinds serializers from workflow plugin `{plugin_name}`. " - f"Make sure the library providing custom step is correctly installed in Python environment.", + f"Make sure the library providing custom step is correctly installed in Python environment.", context="blocks_loading", inner_error=e, ) from e except AttributeError as e: raise PluginInterfaceError( public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " - f"interface correctly and cannot be loaded.", + f"interface correctly and cannot be loaded.", context="blocks_loading", inner_error=e, ) from e -def _load_plugin_serializers(plugin_name: str, module_property: str) -> Dict[str, Callable[[Any], Any]]: +def _load_plugin_serializers( + plugin_name: str, module_property: str +) -> Dict[str, Callable[[Any], Any]]: module = importlib.import_module(plugin_name) if not hasattr(module, module_property): return {} @@ -449,8 +479,8 @@ def _load_plugin_serializers(plugin_name: str, module_property: str) -> Dict[str if not isinstance(kinds_serializers, dict): raise PluginInterfaceError( public_message=f"Provided workflow plugin `{plugin_name}` do not implement blocks loading " - f"interface correctly and cannot be loaded. `{module_property}` is expected to be " - f"dictionary.", + f"interface correctly and cannot be loaded. `{module_property}` is expected to be " + f"dictionary.", context="blocks_loading", ) return kinds_serializers diff --git a/inference/core/workflows/execution_engine/v1/compiler/core.py b/inference/core/workflows/execution_engine/v1/compiler/core.py index bfbe75441..23d501c4e 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/core.py +++ b/inference/core/workflows/execution_engine/v1/compiler/core.py @@ -9,6 +9,8 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowParameter from inference.core.workflows.execution_engine.introspection.blocks_loader import ( load_initializers, + load_kinds_deserializers, + load_kinds_serializers, load_workflow_blocks, ) from inference.core.workflows.execution_engine.profiling.core import ( @@ -55,6 +57,8 @@ class GraphCompilationResult: parsed_workflow_definition: ParsedWorkflowDefinition available_blocks: List[BlockSpecification] initializers: Dict[str, Union[Any, Callable[[None], Any]]] + kinds_serializers: Dict[str, Callable[[Any], Any]] + kinds_deserializers: Dict[str, Callable[[str, Any], Any]] COMPILATION_CACHE = BasicWorkflowsCache[GraphCompilationResult]( @@ -103,6 +107,8 @@ def compile_workflow( execution_graph=graph_compilation_results.execution_graph, steps=steps_by_name, input_substitutions=input_substitutions, + kinds_serializers=graph_compilation_results.kinds_serializers, + kinds_deserializers=graph_compilation_results.kinds_deserializers, ) @@ -129,6 +135,8 @@ def compile_workflow_graph( profiler=profiler, ) initializers = load_initializers(profiler=profiler) + kinds_serializers = load_kinds_serializers(profiler=profiler) + kinds_deserializers = load_kinds_deserializers(profiler=profiler) dynamic_blocks = compile_dynamic_blocks( dynamic_blocks_definitions=workflow_definition.get( "dynamic_blocks_definitions", [] @@ -154,6 +162,8 @@ def compile_workflow_graph( parsed_workflow_definition=parsed_workflow_definition, available_blocks=available_blocks, initializers=initializers, + kinds_serializers=kinds_serializers, + kinds_deserializers=kinds_deserializers, ) COMPILATION_CACHE.cache(key=key, value=result) return result diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index 84d0b0606..51c5fe2cf 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -1,11 +1,12 @@ from abc import abstractmethod from dataclasses import dataclass, field from enum import Enum -from typing import Any, Dict, Generator, List, Optional, Set, Type, Union +from typing import Any, Callable, Dict, Generator, List, Optional, Set, Type, Union import networkx as nx from inference.core.workflows.execution_engine.entities.base import InputType, JsonField +from inference.core.workflows.execution_engine.entities.types import WILDCARD_KIND, Kind from inference.core.workflows.execution_engine.introspection.entities import ( ParsedSelector, ) @@ -53,6 +54,12 @@ class CompiledWorkflow: input_substitutions: List[InputSubstitution] workflow_json: Dict[str, Any] init_parameters: Dict[str, Any] + kinds_serializers: Dict[str, Callable[[str, Any], Any]] = field( + default_factory=dict + ) + kinds_deserializers: Dict[str, Callable[[str, Any], Any]] = field( + default_factory=dict + ) class NodeCategory(Enum): @@ -84,6 +91,9 @@ def is_batch_oriented(self) -> bool: @dataclass class OutputNode(ExecutionGraphNode): output_manifest: JsonField + kind: Union[List[Kind], Dict[str, List[Kind]]] = field( + default_factory=lambda: [WILDCARD_KIND] + ) @property def dimensionality(self) -> int: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 29d5b5db0..ea841ab4c 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1,6 +1,6 @@ import itertools from collections import defaultdict -from copy import copy +from copy import copy, deepcopy from typing import Any, Dict, List, Optional, Set, Tuple, Union import networkx as nx @@ -28,6 +28,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( STEP_AS_SELECTED_ELEMENT, + WILDCARD_KIND, Kind, ) from inference.core.workflows.execution_engine.introspection.entities import ( @@ -428,22 +429,35 @@ def add_edges_for_outputs( node_selector = get_step_selector_from_its_output( step_output_selector=node_selector ) - output_name = construct_output_selector(name=output.name) + output_selector = construct_output_selector(name=output.name) verify_edge_is_created_between_existing_nodes( execution_graph=execution_graph, start=node_selector, - end=output_name, + end=output_selector, + ) + output_node_manifest = node_as( + execution_graph=execution_graph, + node=output_selector, + expected_type=OutputNode, ) if is_step_output_selector(selector_or_value=output.selector): step_manifest = execution_graph.nodes[node_selector][ NODE_COMPILATION_OUTPUT_PROPERTY ].step_manifest step_outputs = step_manifest.get_actual_outputs() - verify_output_selector_points_to_valid_output( + denote_output_node_kind_based_on_step_outputs( output_selector=output.selector, step_outputs=step_outputs, + output_node_manifest=output_node_manifest, ) - execution_graph.add_edge(node_selector, output_name) + else: + input_manifest = node_as( + execution_graph=execution_graph, + node=node_selector, + expected_type=InputNode, + ).input_manifest + output_node_manifest.kind = copy(input_manifest.kind) + execution_graph.add_edge(node_selector, output_selector) return execution_graph @@ -464,20 +478,24 @@ def verify_edge_is_created_between_existing_nodes( ) -def verify_output_selector_points_to_valid_output( +def denote_output_node_kind_based_on_step_outputs( output_selector: str, step_outputs: List[OutputDefinition], + output_node_manifest: OutputNode, ) -> None: selected_output_name = get_last_chunk_of_selector(selector=output_selector) + kinds_for_outputs = {output.name: output.kind for output in step_outputs} if selected_output_name == "*": + output_node_manifest.kind = deepcopy(kinds_for_outputs) return None - defined_output_names = {output.name for output in step_outputs} - if selected_output_name not in defined_output_names: + if selected_output_name not in kinds_for_outputs: raise InvalidReferenceTargetError( public_message=f"Graph definition contains selector {output_selector} that points to output of step " f"that is not defined in workflow block used to create step.", context="workflow_compilation | execution_graph_construction", ) + output_node_manifest.kind = copy(kinds_for_outputs[selected_output_name]) + return None def denote_data_flow_in_workflow( diff --git a/inference/core/workflows/execution_engine/v1/core.py b/inference/core/workflows/execution_engine/v1/core.py index cf683e27c..e975c5162 100644 --- a/inference/core/workflows/execution_engine/v1/core.py +++ b/inference/core/workflows/execution_engine/v1/core.py @@ -73,11 +73,13 @@ def run( runtime_parameters: Dict[str, Any], fps: float = 0, _is_preview: bool = False, + serialize_results: bool = False, ) -> List[Dict[str, Any]]: self._profiler.start_workflow_run() runtime_parameters = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=self._compiled_workflow.workflow_definition.inputs, + kinds_deserializers=self._compiled_workflow.kinds_deserializers, prevent_local_images_loading=self._prevent_local_images_loading, profiler=self._profiler, ) @@ -93,6 +95,8 @@ def run( usage_fps=fps, usage_workflow_id=self._workflow_id, usage_workflow_preview=_is_preview, + kinds_serializers=self._compiled_workflow.kinds_serializers, + serialize_results=serialize_results, profiler=self._profiler, ) self._profiler.end_workflow_run() diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index f4c86ef86..b3816bde1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -1,6 +1,6 @@ from datetime import datetime from functools import partial -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional from inference.core import logger from inference.core.workflows.errors import ( @@ -44,6 +44,8 @@ def run_workflow( workflow: CompiledWorkflow, runtime_parameters: Dict[str, Any], max_concurrent_steps: int, + kinds_serializers: Optional[Dict[str, Callable[[Any], Any]]], + serialize_results: bool = False, profiler: Optional[WorkflowsProfiler] = None, ) -> List[Dict[str, Any]]: execution_data_manager = ExecutionDataManager.init( @@ -71,6 +73,8 @@ def run_workflow( workflow_outputs=workflow.workflow_definition.outputs, execution_graph=workflow.execution_graph, execution_data_manager=execution_data_manager, + serialize_results=serialize_results, + kinds_serializers=kinds_serializers, ) diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index a9b76909f..d0ac4b0ed 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -1,9 +1,13 @@ -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import supervision as sv from networkx import DiGraph +from inference.core.workflows.core_steps.common.serializers import ( + serialise_image, + serialise_sv_detections, +) from inference.core.workflows.core_steps.common.utils import ( sv_detections_to_root_coordinates, ) @@ -14,7 +18,9 @@ from inference.core.workflows.execution_engine.entities.base import ( CoordinatesSystem, JsonField, + WorkflowImageData, ) +from inference.core.workflows.execution_engine.entities.types import WILDCARD_KIND, Kind from inference.core.workflows.execution_engine.v1.compiler.entities import OutputNode from inference.core.workflows.execution_engine.v1.compiler.utils import ( construct_output_selector, @@ -32,6 +38,8 @@ def construct_workflow_output( workflow_outputs: List[JsonField], execution_graph: DiGraph, execution_data_manager: ExecutionDataManager, + serialize_results: bool, + kinds_serializers: Dict[str, Callable[[Any], Any]], ) -> List[Dict[str, Any]]: # Maybe we should make blocks to change coordinates systems: # https://github.com/roboflow/inference/issues/440 @@ -58,6 +66,14 @@ def construct_workflow_output( ).dimensionality for output in workflow_outputs } + kinds_of_output_nodes = { + output.name: node_as( + execution_graph=execution_graph, + node=construct_output_selector(name=output.name), + expected_type=OutputNode, + ).kind + for output in workflow_outputs + } outputs_arrays: Dict[str, Optional[list]] = { name: create_array(indices=np.array(indices)) for name, indices in output_name2indices.items() @@ -87,6 +103,14 @@ def construct_workflow_output( and data_contains_sv_detections(data=data_piece) ): data_piece = convert_sv_detections_coordinates(data=data_piece) + if serialize_results: + output_kind = kinds_of_output_nodes[name] + data_piece = serialize_data_piece( + output_name=name, + data_piece=data_piece, + kind=output_kind, + kinds_serializers=kinds_serializers, + ) try: place_data_in_array( array=array, @@ -152,6 +176,67 @@ def create_empty_index_array(level: int, accumulator: list) -> list: return create_empty_index_array(level - 1, [accumulator]) +def serialize_data_piece( + output_name: str, + data_piece: Any, + kind: Union[List[Kind], Dict[str, List[Kind]]], + kinds_serializers: Dict[str, Callable[[Any], Any]], +) -> Any: + if isinstance(kind, dict): + if not isinstance(data_piece, dict): + raise ExecutionEngineRuntimeError( + public_message=f"Could not serialize Workflow output `{output_name}` - expected the " + f"output to be dictionary containing all outputs of the step, which is not the case." + f"This is most likely a bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | output_construction", + ) + return { + name: serialize_single_workflow_result_field( + output_name=f"{output_name}['{name}']", + value=value, + kind=kind.get(name, [WILDCARD_KIND]), + kinds_serializers=kinds_serializers, + ) + for name, value in data_piece.items() + } + return serialize_single_workflow_result_field( + output_name=output_name, + value=data_piece, + kind=kind, + kinds_serializers=kinds_serializers, + ) + + +def serialize_single_workflow_result_field( + output_name: str, + value: Any, + kind: List[Kind], + kinds_serializers: Dict[str, Callable[[Any], Any]], +) -> Any: + kinds_without_serializer = set() + for single_kind in kind: + serializer = kinds_serializers.get(single_kind.name) + if serializer is None: + kinds_without_serializer.add(single_kind.name) + continue + try: + return serializer(value) + except Exception: + # silent exception passing, as it is enough for one serializer to be applied + # for union of kinds + pass + if not kinds_without_serializer: + raise ExecutionEngineRuntimeError( + public_message=f"Requested Workflow output serialization, but for output `{output_name}` which " + f"evaluates into Python type: {type(value)} cannot successfully apply any of " + f"registered serializers.", + context="workflow_execution | output_construction", + ) + return value + + def place_data_in_array(array: list, index: DynamicBatchIndex, data: Any) -> None: if len(index) == 0: raise ExecutionEngineRuntimeError( diff --git a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py index f8b36475b..a8ed81285 100644 --- a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py @@ -1,30 +1,12 @@ -import os.path -from typing import Any, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional -import cv2 -import numpy as np -from pydantic import ValidationError - -from inference.core.utils.image_utils import ( - attempt_loading_image_from_string, - load_image_from_url, -) from inference.core.workflows.errors import RuntimeInputError -from inference.core.workflows.execution_engine.entities.base import ( - ImageParentMetadata, - InputType, - VideoMetadata, - WorkflowImage, - WorkflowImageData, - WorkflowVideoMetadata, -) +from inference.core.workflows.execution_engine.entities.base import InputType from inference.core.workflows.execution_engine.profiling.core import ( WorkflowsProfiler, execution_phase, ) -BATCH_ORIENTED_PARAMETER_TYPES = {WorkflowImage, WorkflowVideoMetadata} - @execution_phase( name="workflow_input_assembly", @@ -33,6 +15,7 @@ def assemble_runtime_parameters( runtime_parameters: Dict[str, Any], defined_inputs: List[InputType], + kinds_deserializers: Dict[str, Callable[[str, Any], Any]], prevent_local_images_loading: bool = False, profiler: Optional[WorkflowsProfiler] = None, ) -> Dict[str, Any]: @@ -41,19 +24,14 @@ def assemble_runtime_parameters( defined_inputs=defined_inputs, ) for defined_input in defined_inputs: - if isinstance(defined_input, WorkflowImage): - runtime_parameters[defined_input.name] = assemble_input_image( - parameter=defined_input.name, - image=runtime_parameters.get(defined_input.name), + if defined_input.is_batch_oriented(): + runtime_parameters[defined_input.name] = assemble_batch_oriented_input( + defined_input=defined_input, + value=runtime_parameters.get(defined_input.name), + kinds_deserializers=kinds_deserializers, input_batch_size=input_batch_size, prevent_local_images_loading=prevent_local_images_loading, ) - elif isinstance(defined_input, WorkflowVideoMetadata): - runtime_parameters[defined_input.name] = assemble_video_metadata( - parameter=defined_input.name, - video_metadata=runtime_parameters.get(defined_input.name), - input_batch_size=input_batch_size, - ) else: runtime_parameters[defined_input.name] = assemble_inference_parameter( parameter=defined_input.name, @@ -67,7 +45,7 @@ def determine_input_batch_size( runtime_parameters: Dict[str, Any], defined_inputs: List[InputType] ) -> int: for defined_input in defined_inputs: - if type(defined_input) not in BATCH_ORIENTED_PARAMETER_TYPES: + if not defined_input.is_batch_oriented(): continue parameter_value = runtime_parameters.get(defined_input.name) if isinstance(parameter_value, list) and len(parameter_value) > 1: @@ -75,167 +53,93 @@ def determine_input_batch_size( return 1 -def assemble_input_image( - parameter: str, - image: Any, +def assemble_batch_oriented_input( + defined_input: InputType, + value: Any, + kinds_deserializers: Dict[str, Callable[[str, Any], Any]], input_batch_size: int, - prevent_local_images_loading: bool = False, -) -> List[WorkflowImageData]: - if image is None: + prevent_local_images_loading: bool, +) -> List[Any]: + if value is None: raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` defined as " - f"`WorkflowImage`, but value is not provided.", + public_message=f"Detected runtime parameter `{defined_input.name}` defined as " + f"`{defined_input.type}` (of kind `{defined_input.kind}`), " + f"but value is not provided.", context="workflow_execution | runtime_input_validation", ) - if not isinstance(image, list): - return [ - _assemble_input_image( - parameter=parameter, - image=image, + if not isinstance(value, list): + result = [ + assemble_single_element_of_batch_oriented_input( + defined_input=defined_input, + value=value, + kinds_deserializers=kinds_deserializers, prevent_local_images_loading=prevent_local_images_loading, ) ] * input_batch_size - result = [ - _assemble_input_image( - parameter=parameter, - image=element, - identifier=idx, - prevent_local_images_loading=prevent_local_images_loading, - ) - for idx, element in enumerate(image) - ] + else: + result = [ + assemble_single_element_of_batch_oriented_input( + defined_input=defined_input, + value=element, + kinds_deserializers=kinds_deserializers, + prevent_local_images_loading=prevent_local_images_loading, + identifier=identifier, + ) + for identifier, element in enumerate(value) + ] if len(result) != input_batch_size: raise RuntimeInputError( public_message="Expected all batch-oriented workflow inputs be the same length, or of length 1 - " - f"but parameter: {parameter} provided with batch size {len(result)}, where expected " + f"but parameter: {defined_input.name} provided with batch size {len(result)}, where expected " f"batch size based on remaining parameters is: {input_batch_size}.", context="workflow_execution | runtime_input_validation", ) return result -def _assemble_input_image( - parameter: str, - image: Any, +def assemble_single_element_of_batch_oriented_input( + defined_input: InputType, + value: Any, + kinds_deserializers: Dict[str, Callable[[str, Any], Any]], + prevent_local_images_loading: bool, identifier: Optional[int] = None, - prevent_local_images_loading: bool = False, -) -> WorkflowImageData: - parent_id = parameter +) -> None: + matching_deserializers = [ + (kind.name, kinds_deserializers[kind.name]) + for kind in defined_input.kind + if kind.name in kinds_deserializers + ] + if not matching_deserializers: + return value + parameter_identifier = defined_input.name if identifier is not None: - parent_id = f"{parent_id}.[{identifier}]" - video_metadata = None - if isinstance(image, dict) and "video_metadata" in image: - video_metadata = _assemble_video_metadata( - parameter=parameter, video_metadata=image["video_metadata"] - ) - if isinstance(image, dict) and isinstance(image.get("value"), np.ndarray): - image = image["value"] - if isinstance(image, np.ndarray): - parent_metadata = ImageParentMetadata(parent_id=parent_id) - return WorkflowImageData( - parent_metadata=parent_metadata, - numpy_image=image, - video_metadata=video_metadata, - ) - try: - if isinstance(image, dict): - image = image["value"] - if isinstance(image, str): - base64_image = None - image_reference = None - if image.startswith("http://") or image.startswith("https://"): - image_reference = image - image = load_image_from_url(value=image) - elif not prevent_local_images_loading and os.path.exists(image): - # prevent_local_images_loading is introduced to eliminate - # server vulnerability - namely it prevents local server - # file system from being exploited. - image_reference = image - image = cv2.imread(image) - else: - base64_image = image - image = attempt_loading_image_from_string(image)[0] - parent_metadata = ImageParentMetadata(parent_id=parent_id) - return WorkflowImageData( - parent_metadata=parent_metadata, - numpy_image=image, - base64_image=base64_image, - image_reference=image_reference, - video_metadata=video_metadata, - ) - except Exception as error: - raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` defined as `WorkflowImage` " - f"that is invalid. Failed on input validation. Details: {error}", - context="workflow_execution | runtime_input_validation", - ) from error + parameter_identifier = f"{parameter_identifier}.[{identifier}]" + errors = [] + for kind, deserializer in matching_deserializers: + try: + if kind == "image": + # this is left-over of bad design decision with adding `prevent_local_images_loading` + # flag at the level of execution engine. To avoid BC we need to + # be aware of special treatment for image kind. + # TODO: deprecate in v2 of Execution Engine + return deserializer( + parameter_identifier, value, prevent_local_images_loading + ) + return deserializer(parameter_identifier, value) + except Exception as error: + errors.append((kind, error)) + error_message = ( + f"Failed to assemble `{parameter_identifier}`. " + f"Could not successfully use any deserializer for declared kinds. Details: " + ) + for kind, error in errors: + error_message = f"{error_message}\nKind: `{kind}` - Error: {error}" raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` defined as `WorkflowImage` " - f"with type {type(image)} that is invalid. Workflows accept only np.arrays " - f"and dicts with keys `type` and `value` compatible with `inference` (or list of them).", + public_message=error_message, context="workflow_execution | runtime_input_validation", ) -def assemble_video_metadata( - parameter: str, - video_metadata: Any, - input_batch_size: int, -) -> List[VideoMetadata]: - if video_metadata is None: - raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` defined as " - f"`WorkflowVideoMetadata`, but value is not provided.", - context="workflow_execution | runtime_input_validation", - ) - if not isinstance(video_metadata, list): - return [ - _assemble_video_metadata( - parameter=parameter, - video_metadata=video_metadata, - ) - ] * input_batch_size - result = [ - _assemble_video_metadata( - parameter=parameter, - video_metadata=element, - ) - for element in video_metadata - ] - if len(result) != input_batch_size: - raise RuntimeInputError( - public_message="Expected all batch-oriented workflow inputs be the same length, or of length 1 - " - f"but parameter: {parameter} provided with batch size {len(result)}, where expected " - f"batch size based on remaining parameters is: {input_batch_size}.", - context="workflow_execution | runtime_input_validation", - ) - return result - - -def _assemble_video_metadata( - parameter: str, - video_metadata: Any, -) -> VideoMetadata: - if isinstance(video_metadata, VideoMetadata): - return video_metadata - if not isinstance(video_metadata, dict): - raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` holding " - f"`WorkflowVideoMetadata`, but provided value is not a dict.", - context="workflow_execution | runtime_input_validation", - ) - try: - return VideoMetadata.model_validate(video_metadata) - except ValidationError as error: - raise RuntimeInputError( - public_message=f"Detected runtime parameter `{parameter}` holding " - f"`WorkflowVideoMetadata`, but provided value is malformed. " - f"See details in inner error.", - context="workflow_execution | runtime_input_validation", - inner_error=error, - ) - - def assemble_inference_parameter( parameter: str, runtime_parameters: Dict[str, Any], diff --git a/tests/conftest.py b/tests/conftest.py index 0c40096e3..66fcdd0ec 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,4 @@ import os os.environ["TELEMETRY_OPT_OUT"] = "True" +os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = "[CPUExecutionProvider]" diff --git a/tests/workflows/unit_tests/execution_engine/compiler/test_graph_constructor.py b/tests/workflows/unit_tests/execution_engine/compiler/test_graph_constructor.py index 8deafb0e0..3e12de6e9 100644 --- a/tests/workflows/unit_tests/execution_engine/compiler/test_graph_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/compiler/test_graph_constructor.py @@ -12,6 +12,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, + OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_MODEL_ID_KIND, ) from inference.core.workflows.execution_engine.v1.compiler.entities import ( @@ -122,6 +123,7 @@ def test_execution_graph_construction_for_trivial_workflow() -> None: selector="$outputs.predictions", data_lineage=[""], output_manifest=output_manifest, + kind=[OBJECT_DETECTION_PREDICTION_KIND], ), "Output node must be created correctly" assert result.has_edge( "$inputs.image", "$steps.model_1" diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index bb01cd8e5..df51aec52 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -6,6 +6,7 @@ import supervision as sv from networkx import DiGraph +from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS from inference.core.workflows.execution_engine.entities.base import JsonField from inference.core.workflows.execution_engine.v1.compiler.entities import ( NodeCategory, @@ -413,6 +414,8 @@ def get_non_batch_data(selector: str) -> Any: workflow_outputs=workflow_outputs, execution_graph=execution_graph, execution_data_manager=execution_data_manager, + serialize_results=True, + kinds_serializers=KINDS_SERIALIZERS, ) # then @@ -529,6 +532,8 @@ def get_batch_data(selector: str, indices: List[tuple]) -> List[Any]: workflow_outputs=workflow_outputs, execution_graph=execution_graph, execution_data_manager=execution_data_manager, + serialize_results=True, + kinds_serializers=KINDS_SERIALIZERS, ) # then diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py index 9cac89a2a..2b866b98e 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py @@ -7,6 +7,8 @@ import numpy as np import pytest +from inference.core.workflows.core_steps.common import deserializers +from inference.core.workflows.core_steps.loader import KINDS_DESERIALIZERS from inference.core.workflows.errors import RuntimeInputError from inference.core.workflows.execution_engine.entities.base import ( VideoMetadata, @@ -14,9 +16,6 @@ WorkflowParameter, WorkflowVideoMetadata, ) -from inference.core.workflows.execution_engine.v1.executor import ( - runtime_input_assembler, -) from inference.core.workflows.execution_engine.v1.executor.runtime_input_assembler import ( assemble_runtime_parameters, ) @@ -32,10 +31,11 @@ def test_assemble_runtime_parameters_when_image_is_not_provided() -> None: _ = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) -@mock.patch.object(runtime_input_assembler, "load_image_from_url") +@mock.patch.object(deserializers, "load_image_from_url") def test_assemble_runtime_parameters_when_image_is_provided_as_single_element_dict( load_image_from_url_mock: MagicMock, ) -> None: @@ -53,6 +53,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_as_single_element_di result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -83,6 +84,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_as_single_element_di result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -115,6 +117,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_as_single_element_di runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, prevent_local_images_loading=True, + kinds_deserializers=KINDS_DESERIALIZERS, ) @@ -129,6 +132,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_as_single_element_np result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -153,6 +157,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_as_unknown_element() _ = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) @@ -173,6 +178,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_in_batch() -> None: result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -225,6 +231,7 @@ def test_assemble_runtime_parameters_when_image_is_provided_with_video_metadata( result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -253,6 +260,7 @@ def test_assemble_runtime_parameters_when_parameter_not_provided() -> None: result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -268,6 +276,7 @@ def test_assemble_runtime_parameters_when_parameter_provided() -> None: result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -297,6 +306,7 @@ def test_assemble_runtime_parameters_when_images_with_different_matching_batch_s result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -338,6 +348,7 @@ def test_assemble_runtime_parameters_when_images_with_different_and_not_matching _ = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) @@ -379,6 +390,7 @@ def test_assemble_runtime_parameters_when_video_metadata_with_different_matching result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -405,6 +417,7 @@ def test_assemble_runtime_parameters_when_video_metadata_declared_but_not_provid _ = assemble_runtime_parameters( runtime_parameters={}, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) @@ -430,6 +443,7 @@ def test_assemble_runtime_parameters_when_video_metadata_declared_and_provided_a result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -456,6 +470,7 @@ def test_assemble_runtime_parameters_when_video_metadata_declared_and_provided_a result = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) # then @@ -505,4 +520,5 @@ def test_assemble_runtime_parameters_when_video_metadata_with_different_and_not_ _ = assemble_runtime_parameters( runtime_parameters=runtime_parameters, defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, ) From 209866ec986195213d883f9ea8198ca277918a16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 30 Oct 2024 21:45:49 +0100 Subject: [PATCH 03/67] WIP - added handling for arbitrary dimensions in inputs --- .../core_steps/common/deserializers.py | 4 +- .../execution_engine/entities/base.py | 19 +- .../execution_engine/entities/types.py | 5 +- .../execution_engine/v1/compiler/entities.py | 2 +- .../v1/compiler/graph_constructor.py | 23 ++- .../v1/compiler/reference_type_checker.py | 16 +- .../workflows/execution_engine/v1/core.py | 2 +- .../execution_engine/v1/executor/core.py | 1 + .../dynamic_batches_manager.py | 39 +++- .../execution_data_manager/manager.py | 1 + .../step_input_assembler.py | 28 ++- .../v1/executor/output_constructor.py | 12 +- .../v1/executor/runtime_input_assembler.py | 82 ++++++-- .../hosted_platform_tests/test_workflows.py | 2 +- .../test_workflow_endpoints.py | 2 +- ...st_workflow_with_arbitrary_batch_inputs.py | 186 ++++++++++++++++++ 16 files changed, 380 insertions(+), 44 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py diff --git a/inference/core/workflows/core_steps/common/deserializers.py b/inference/core/workflows/core_steps/common/deserializers.py index 0449cdeef..8d1285887 100644 --- a/inference/core/workflows/core_steps/common/deserializers.py +++ b/inference/core/workflows/core_steps/common/deserializers.py @@ -46,6 +46,8 @@ def deserialize_image_kind( image: Any, prevent_local_images_loading: bool = False, ) -> WorkflowImageData: + if isinstance(image, WorkflowImageData): + return image video_metadata = None if isinstance(image, dict) and "video_metadata" in image: video_metadata = deserialize_video_metadata_kind( @@ -94,7 +96,7 @@ def deserialize_image_kind( ) from error raise RuntimeInputError( public_message=f"Detected runtime parameter `{parameter}` defined as `WorkflowImage` " - f"with type {type(image)} that is invalid. Workflows accept only np.arrays " + f"with type {type(image)} that is invalid. Workflows accept only np.arrays, `WorkflowImageData` " f"and dicts with keys `type` and `value` compatible with `inference` (or list of them).", context="workflow_execution | runtime_input_validation", ) diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index 9c638774c..dc77ed9b7 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -57,7 +57,8 @@ def get_type(self) -> str: class WorkflowInput(BaseModel): type: str name: str - kind: List[Kind] + kind: List[Union[str, Kind]] + dimensionality: int @classmethod def is_batch_oriented(cls) -> bool: @@ -67,7 +68,8 @@ def is_batch_oriented(cls) -> bool: class WorkflowImage(WorkflowInput): type: Literal["WorkflowImage", "InferenceImage"] name: str - kind: List[Kind] = Field(default=[IMAGE_KIND]) + kind: List[Union[str, Kind]] = Field(default=[IMAGE_KIND]) + dimensionality: int = Field(default=1) @classmethod def is_batch_oriented(cls) -> bool: @@ -77,7 +79,8 @@ def is_batch_oriented(cls) -> bool: class WorkflowVideoMetadata(WorkflowInput): type: Literal["WorkflowVideoMetadata"] name: str - kind: List[Kind] = Field(default=[VIDEO_METADATA_KIND]) + kind: List[Union[str, Kind]] = Field(default=[VIDEO_METADATA_KIND]) + dimensionality: int = Field(default=1) @classmethod def is_batch_oriented(cls) -> bool: @@ -87,16 +90,22 @@ def is_batch_oriented(cls) -> bool: class WorkflowDataBatch(WorkflowInput): type: Literal["WorkflowDataBatch"] name: str - kind: List[Kind] = Field(default_factory=lambda: [WILDCARD_KIND]) + kind: List[Union[str, Kind]] = Field(default_factory=lambda: [WILDCARD_KIND]) + dimensionality: int = Field(default=1) + + @classmethod + def is_batch_oriented(cls) -> bool: + return True class WorkflowParameter(WorkflowInput): type: Literal["WorkflowParameter", "InferenceParameter"] name: str - kind: List[Kind] = Field(default_factory=lambda: [WILDCARD_KIND]) + kind: List[Union[str, Kind]] = Field(default_factory=lambda: [WILDCARD_KIND]) default_value: Optional[Union[float, int, str, bool, list, set]] = Field( default=None ) + dimensionality: int = Field(default=0) InputType = Annotated[ diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 748fe3c52..b5abf0e8d 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1058,7 +1058,10 @@ def StepOutputSelector(kind: Optional[List[Kind]] = None): } return Annotated[ str, - StringConstraints(pattern=r"^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$"), + StringConstraints( + # pattern=r"^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$" + pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" + ), Field(json_schema_extra=json_schema_extra), ] diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index 51c5fe2cf..6c9b945c6 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -91,7 +91,7 @@ def is_batch_oriented(self) -> bool: @dataclass class OutputNode(ExecutionGraphNode): output_manifest: JsonField - kind: Union[List[Kind], Dict[str, List[Kind]]] = field( + kind: Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]] = field( default_factory=lambda: [WILDCARD_KIND] ) diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index ea841ab4c..65361acc4 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -2,6 +2,7 @@ from collections import defaultdict from copy import copy, deepcopy from typing import Any, Dict, List, Optional, Set, Tuple, Union +from uuid import uuid4 import networkx as nx from networkx import DiGraph @@ -146,11 +147,20 @@ def add_input_nodes_for_graph( ) -> DiGraph: for input_spec in inputs: input_selector = construct_input_selector(input_name=input_spec.name) - data_lineage = ( - [] - if not input_spec.is_batch_oriented() - else [WORKFLOW_INPUT_BATCH_LINEAGE_ID] - ) + if input_spec.is_batch_oriented(): + if input_spec.dimensionality < 1: + raise ExecutionGraphStructureError( + public_message=f"Detected batch oriented input `{input_spec.name}` with " + f"declared dimensionality `{input_spec.dimensionality}` which is below " + f"one (one is minimum dimensionality of the batch). Fix input definition in" + f"your Workflow.", + context="workflow_compilation | execution_graph_construction", + ) + data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] + for _ in range(input_spec.dimensionality - 1): + data_lineage.append(f"{uuid4()}") + else: + data_lineage = [] compilation_output = InputNode( node_category=NodeCategory.INPUT_NODE, name=input_spec.name, @@ -288,7 +298,8 @@ def add_edge_for_step( f"Failed to validate reference provided for step: {source_step_selector} regarding property: " f"{target_step_parsed_selector.definition.property_name} with value: {target_step_parsed_selector.value}. " f"Allowed kinds of references for this property: {list(set(e.name for e in expected_input_kind))}. " - f"Types of output for referred property: {list(set(a.name for a in actual_input_kind))}" + f"Types of output for referred property: " + f"{list(set(a.name if isinstance(a, Kind) else a for a in actual_input_kind))}" ) validate_reference_kinds( expected=expected_input_kind, diff --git a/inference/core/workflows/execution_engine/v1/compiler/reference_type_checker.py b/inference/core/workflows/execution_engine/v1/compiler/reference_type_checker.py index 845b3aee4..c1733ceb7 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/reference_type_checker.py +++ b/inference/core/workflows/execution_engine/v1/compiler/reference_type_checker.py @@ -1,16 +1,16 @@ -from typing import List +from typing import List, Union from inference.core.workflows.errors import ReferenceTypeError from inference.core.workflows.execution_engine.entities.types import Kind def validate_reference_kinds( - expected: List[Kind], - actual: List[Kind], + expected: List[Union[Kind, str]], + actual: List[Union[Kind, str]], error_message: str, ) -> None: - expected_kind_names = set(e.name for e in expected) - actual_kind_names = set(a.name for a in actual) + expected_kind_names = set(_get_kind_name(kind=e) for e in expected) + actual_kind_names = set(_get_kind_name(kind=a) for a in actual) if "*" in expected_kind_names or "*" in actual_kind_names: return None if len(expected_kind_names.intersection(actual_kind_names)) == 0: @@ -18,3 +18,9 @@ def validate_reference_kinds( public_message=error_message, context="workflow_compilation | execution_graph_construction", ) + + +def _get_kind_name(kind: Union[Kind, str]) -> str: + if isinstance(kind, Kind): + return kind.name + return kind diff --git a/inference/core/workflows/execution_engine/v1/core.py b/inference/core/workflows/execution_engine/v1/core.py index e975c5162..7135fdb36 100644 --- a/inference/core/workflows/execution_engine/v1/core.py +++ b/inference/core/workflows/execution_engine/v1/core.py @@ -21,7 +21,7 @@ validate_runtime_input, ) -EXECUTION_ENGINE_V1_VERSION = Version("1.2.0") +EXECUTION_ENGINE_V1_VERSION = Version("1.3.0") class ExecutionEngineV1(BaseExecutionEngine): diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index b3816bde1..940b3df43 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -200,6 +200,7 @@ def run_simd_step_in_batch_mode( step_input = execution_data_manager.get_simd_step_input( step_selector=step_selector ) + print(f"step [{step_selector}] input: {step_input}") with profiler.profile_execution_phase( name="step_code_execution", categories=["workflow_block_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py index cc715cdbd..a35938ea4 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/dynamic_batches_manager.py @@ -2,7 +2,7 @@ from networkx import DiGraph -from inference.core.workflows.errors import ExecutionEngineRuntimeError +from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.v1.compiler.entities import ( ExecutionGraphNode, InputNode, @@ -83,7 +83,38 @@ def assembly_root_batch_indices( expected_type=InputNode, ) input_parameter_name = input_node_data.input_manifest.name - dimension_value = len(runtime_parameters[input_parameter_name]) - lineage_id = identify_lineage(lineage=node_data.data_lineage) - result[lineage_id] = [(i,) for i in range(dimension_value)] + root_lineage_id = identify_lineage(lineage=node_data.data_lineage[:1]) + result[root_lineage_id] = [ + (i,) for i in range(len(runtime_parameters[input_parameter_name])) + ] + if input_node_data.input_manifest.dimensionality > 1: + lineage_id = identify_lineage(lineage=node_data.data_lineage) + result[lineage_id] = generate_indices_for_input_node( + dimensionality=input_node_data.input_manifest.dimensionality, + dimension_value=runtime_parameters[input_parameter_name], + ) + return result + + +def generate_indices_for_input_node( + dimensionality: int, dimension_value: list, indices_prefix: DynamicBatchIndex = () +) -> List[DynamicBatchIndex]: + if not isinstance(dimension_value, list): + raise AssumptionError( + public_message=f"Could not establish input data batch indices. This is most likely the bug. Contact " + f"Roboflow team through github issues (https://github.com/roboflow/inference/issues) " + f"providing full context of the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if dimensionality == len(indices_prefix) + 1: + return [indices_prefix + (i,) for i in range(len(dimension_value))] + result = [] + for i, value_element in enumerate(dimension_value): + result.extend( + generate_indices_for_input_node( + dimensionality=dimensionality, + dimension_value=value_element, + indices_prefix=indices_prefix + (i,), + ) + ) return result diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 349cc1e46..b50050127 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -245,6 +245,7 @@ def register_simd_step_output( outputs=outputs, ) return None + print(f"Registering: {step_name}, {indices}, {len(outputs)}") self._execution_cache.register_batch_of_step_outputs( step_name=step_name, indices=indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index c96620271..a402539ed 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -454,7 +454,10 @@ def get_non_compound_parameter_value( mask_for_dimension = masks[parameter_dimensionality] if dynamic_parameter.points_to_input(): input_name = get_last_chunk_of_selector(selector=dynamic_parameter.selector) - batch_input = runtime_parameters[input_name] + batch_input = _flatten_batch_oriented_inputs( + runtime_parameters[input_name], + dimensionality=parameter_dimensionality, + ) if mask_for_dimension is not None: if len(lineage_indices) != len(batch_input): raise ExecutionEngineRuntimeError( @@ -516,6 +519,29 @@ def get_non_compound_parameter_value( return result, result.indices +def _flatten_batch_oriented_inputs( + inputs: list, + dimensionality: int, +) -> List[Any]: + if dimensionality == 0 or not isinstance(inputs, list): + raise AssumptionError( + public_message=f"Could not prepare batch-oriented input data. This is most likely the bug. Contact " + f"Roboflow team through github issues (https://github.com/roboflow/inference/issues) " + f"providing full context of the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if dimensionality == 1: + return inputs + result = [] + for element in inputs: + result.extend( + _flatten_batch_oriented_inputs( + inputs=element, dimensionality=dimensionality - 1 + ) + ) + return result + + def reduce_batch_dimensionality( indices: List[DynamicBatchIndex], upper_level_index: List[DynamicBatchIndex], diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index d0ac4b0ed..0d798b8c9 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -91,8 +91,11 @@ def construct_workflow_output( ) ) for name in batch_oriented_outputs: + print(f"constructing output: {name}") array = outputs_arrays[name] + print(f"output array: {array}") indices = output_name2indices[name] + print(f"output indices: {indices}") data = execution_data_manager.get_batch_data( selector=name2selector[name], indices=indices, @@ -179,7 +182,7 @@ def create_empty_index_array(level: int, accumulator: list) -> list: def serialize_data_piece( output_name: str, data_piece: Any, - kind: Union[List[Kind], Dict[str, List[Kind]]], + kind: Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]], kinds_serializers: Dict[str, Callable[[Any], Any]], ) -> Any: if isinstance(kind, dict): @@ -212,14 +215,15 @@ def serialize_data_piece( def serialize_single_workflow_result_field( output_name: str, value: Any, - kind: List[Kind], + kind: List[Union[Kind, str]], kinds_serializers: Dict[str, Callable[[Any], Any]], ) -> Any: kinds_without_serializer = set() for single_kind in kind: - serializer = kinds_serializers.get(single_kind.name) + kind_name = single_kind.name if isinstance(single_kind, Kind) else kind + serializer = kinds_serializers.get(kind_name) if serializer is None: - kinds_without_serializer.add(single_kind.name) + kinds_without_serializer.add(kind_name) continue try: return serializer(value) diff --git a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py index a8ed81285..644e41cb9 100644 --- a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py @@ -1,7 +1,8 @@ -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Union -from inference.core.workflows.errors import RuntimeInputError +from inference.core.workflows.errors import AssumptionError, RuntimeInputError from inference.core.workflows.execution_engine.entities.base import InputType +from inference.core.workflows.execution_engine.entities.types import Kind from inference.core.workflows.execution_engine.profiling.core import ( WorkflowsProfiler, execution_phase, @@ -63,7 +64,7 @@ def assemble_batch_oriented_input( if value is None: raise RuntimeInputError( public_message=f"Detected runtime parameter `{defined_input.name}` defined as " - f"`{defined_input.type}` (of kind `{defined_input.kind}`), " + f"`{defined_input.type}` (of kind `{[_get_kind_name(k) for k in defined_input.kind]}`), " f"but value is not provided.", context="workflow_execution | runtime_input_validation", ) @@ -78,12 +79,13 @@ def assemble_batch_oriented_input( ] * input_batch_size else: result = [ - assemble_single_element_of_batch_oriented_input( + assemble_nested_batch_oriented_input( + current_depth=1, defined_input=defined_input, value=element, kinds_deserializers=kinds_deserializers, prevent_local_images_loading=prevent_local_images_loading, - identifier=identifier, + identifier=f"{defined_input.name}.[{identifier}]", ) for identifier, element in enumerate(value) ] @@ -97,23 +99,71 @@ def assemble_batch_oriented_input( return result -def assemble_single_element_of_batch_oriented_input( +def assemble_nested_batch_oriented_input( + current_depth: int, defined_input: InputType, value: Any, kinds_deserializers: Dict[str, Callable[[str, Any], Any]], prevent_local_images_loading: bool, - identifier: Optional[int] = None, -) -> None: - matching_deserializers = [ - (kind.name, kinds_deserializers[kind.name]) - for kind in defined_input.kind - if kind.name in kinds_deserializers + identifier: Optional[str] = None, +) -> Union[list, Any]: + if current_depth > defined_input.dimensionality: + raise AssumptionError( + public_message=f"While constructing input `{defined_input.name}`, Execution Engine encountered the state " + f"in which it is not possible to construct nested batch-oriented input. " + f"This is most likely the bug. Contact Roboflow team " + f"through github issues (https://github.com/roboflow/inference/issues) providing full " + f"context of the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if current_depth == defined_input.dimensionality: + return assemble_single_element_of_batch_oriented_input( + defined_input=defined_input, + value=value, + kinds_deserializers=kinds_deserializers, + prevent_local_images_loading=prevent_local_images_loading, + identifier=identifier, + ) + if not isinstance(value, list): + raise RuntimeInputError( + public_message=f"Workflow input `{defined_input.name}` is declared to be nested batch with dimensionality " + f"`{defined_input.dimensionality}`. Input data does not define batch at the {current_depth} " + f"dimensionality level.", + context="workflow_execution | runtime_input_validation", + ) + return [ + assemble_nested_batch_oriented_input( + current_depth=current_depth + 1, + defined_input=defined_input, + value=element, + kinds_deserializers=kinds_deserializers, + prevent_local_images_loading=prevent_local_images_loading, + identifier=f"{identifier}.[{idx}]", + ) + for idx, element in enumerate(value) ] + + +def assemble_single_element_of_batch_oriented_input( + defined_input: InputType, + value: Any, + kinds_deserializers: Dict[str, Callable[[str, Any], Any]], + prevent_local_images_loading: bool, + identifier: Optional[str] = None, +) -> Any: + if value is None: + return None + matching_deserializers = [] + for kind in defined_input.kind: + kind_name = _get_kind_name(kind=kind) + if kind_name not in kinds_deserializers: + continue + matching_deserializers.append((kind_name, kinds_deserializers[kind_name])) if not matching_deserializers: return value parameter_identifier = defined_input.name if identifier is not None: - parameter_identifier = f"{parameter_identifier}.[{identifier}]" + parameter_identifier = identifier errors = [] for kind, deserializer in matching_deserializers: try: @@ -140,6 +190,12 @@ def assemble_single_element_of_batch_oriented_input( ) +def _get_kind_name(kind: Union[Kind, str]) -> str: + if isinstance(kind, Kind): + return kind.name + return kind + + def assemble_inference_parameter( parameter: str, runtime_parameters: Dict[str, Any], diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index bbe4c57a1..b3ff50620 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -129,7 +129,7 @@ def test_get_versions_of_execution_engine(object_detection_service_url: str) -> # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.2.0"] + assert response_data["versions"] == ["1.3.0"] FUNCTION = """ diff --git a/tests/inference/integration_tests/test_workflow_endpoints.py b/tests/inference/integration_tests/test_workflow_endpoints.py index c7c38cb20..f3ca64a1b 100644 --- a/tests/inference/integration_tests/test_workflow_endpoints.py +++ b/tests/inference/integration_tests/test_workflow_endpoints.py @@ -691,7 +691,7 @@ def test_get_versions_of_execution_engine(server_url: str) -> None: # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.2.0"] + assert response_data["versions"] == ["1.3.0"] def test_getting_block_schema_using_get_endpoint(server_url) -> None: diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py new file mode 100644 index 000000000..d1a7895c3 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -0,0 +1,186 @@ +import numpy as np + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine + +TWO_STAGE_WORKFLOW = { + "version": "1.3.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + }, + { + "type": "Crop", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.general_detection.predictions", + }, + { + "type": "ClassificationModel", + "name": "breds_classification", + "image": "$steps.cropping.crops", + "model_id": "dog-breed-xpaq6/1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.breds_classification.predictions", + }, + ], +} + + +OBJECT_DETECTION_WORKFLOW = { + "version": "1.3.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.general_detection.*", + }, + ], +} + + +CROP_WORKFLOW = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowDataBatch", + "name": "predictions", + "kind": ["object_detection_prediction"], + }, + ], + "steps": [ + { + "type": "Crop", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$inputs.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.cropping.*", + }, + ], +} + +CLASSIFICATION_WORKFLOW = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "crops", + "kind": ["image"], + "dimensionality": 2, + }, + ], + "steps": [ + { + "type": "ClassificationModel", + "name": "breds_classification", + "image": "$inputs.crops", + "model_id": "dog-breed-xpaq6/1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.breds_classification.*", + }, + ], +} + + +def test_debug_execution_of_workflow_for_single_image_without_conditional_evaluation( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + end_to_end_execution_engine = ExecutionEngine.init( + workflow_definition=TWO_STAGE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + first_step_execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + second_step_execution_engine = ExecutionEngine.init( + workflow_definition=CROP_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + third_step_execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + e2e_results = end_to_end_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + } + ) + detection_results = first_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + } + ) + cropping_results = second_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + "predictions": detection_results[0]["result"]["predictions"], + } + ) + classification_results = third_step_execution_engine.run( + runtime_parameters={ + "crops": [[e["crops"] for e in cropping_results[0]["result"]]], + } + ) + print(classification_results) + raise Exception() + # assert isinstance(result, list), "Expected list to be delivered" + # assert len(result) == 1, "Expected 1 element in the output for one input image" + # assert set(result[0].keys()) == { + # "predictions", + # }, "Expected all declared outputs to be delivered" + # assert ( + # len(result[0]["predictions"]) == 2 + # ), "Expected 2 dogs crops on input image, hence 2 nested classification results" + # assert [result[0]["predictions"][0]["top"], result[0]["predictions"][1]["top"]] == [ + # "116.Parson_russell_terrier", + # "131.Wirehaired_pointing_griffon", + # ], "Expected predictions to be as measured in reference run" From 297177f025fd082e3b07ca777769981f1cd65db0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 31 Oct 2024 13:09:27 +0100 Subject: [PATCH 04/67] Finish basic testing of the new feature --- inference/core/interfaces/http/http_api.py | 5 +- .../execution_engine/entities/types.py | 5 + .../introspection/entities.py | 1 + .../introspection/schema_parser.py | 22 +- .../v1/compiler/graph_constructor.py | 22 + .../v1/executor/runtime_input_assembler.py | 2 + .../core/interfaces/http/test_orjson_utils.py | 40 +- ...st_workflow_with_arbitrary_batch_inputs.py | 475 +++++++++++++++++- .../introspection/test_schema_parser.py | 40 +- .../introspection/test_selectors_parser.py | 5 +- 10 files changed, 545 insertions(+), 72 deletions(-) diff --git a/inference/core/interfaces/http/http_api.py b/inference/core/interfaces/http/http_api.py index 5a95c6f51..c1088f7bc 100644 --- a/inference/core/interfaces/http/http_api.py +++ b/inference/core/interfaces/http/http_api.py @@ -159,10 +159,7 @@ handle_describe_workflows_blocks_request, handle_describe_workflows_interface, ) -from inference.core.interfaces.http.orjson_utils import ( - orjson_response, - serialise_workflow_result, -) +from inference.core.interfaces.http.orjson_utils import orjson_response from inference.core.interfaces.stream_manager.api.entities import ( CommandResponse, ConsumePipelineResponse, diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index b5abf0e8d..02f02e1d4 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -35,6 +35,7 @@ def __hash__(self) -> int: KIND_KEY = "kind" DIMENSIONALITY_OFFSET_KEY = "dimensionality_offset" DIMENSIONALITY_REFERENCE_PROPERTY_KEY = "dimensionality_reference_property" +SELECTOR_POINTS_TO_BATCH_KEY = "selector_points_to_batch" WILDCARD_KIND_DOCS = """ This is a special kind that represents Any value - which is to be used by default if @@ -1055,6 +1056,7 @@ def StepOutputSelector(kind: Optional[List[Kind]] = None): REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: STEP_OUTPUT_AS_SELECTED_ELEMENT, KIND_KEY: [k.dict() for k in kind], + SELECTOR_POINTS_TO_BATCH_KEY: True, } return Annotated[ str, @@ -1089,6 +1091,7 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: "workflow_image", KIND_KEY: [IMAGE_KIND.dict()], + SELECTOR_POINTS_TO_BATCH_KEY: True, } ), ] @@ -1101,6 +1104,7 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: STEP_OUTPUT_AS_SELECTED_ELEMENT, KIND_KEY: [IMAGE_KIND.dict()], + SELECTOR_POINTS_TO_BATCH_KEY: True, } ), ] @@ -1116,6 +1120,7 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): REFERENCE_KEY: True, SELECTED_ELEMENT_KEY: "workflow_video_metadata", KIND_KEY: [VIDEO_METADATA_KIND.dict()], + SELECTOR_POINTS_TO_BATCH_KEY: True, } ), ] diff --git a/inference/core/workflows/execution_engine/introspection/entities.py b/inference/core/workflows/execution_engine/introspection/entities.py index 8fba19528..786f9a633 100644 --- a/inference/core/workflows/execution_engine/introspection/entities.py +++ b/inference/core/workflows/execution_engine/introspection/entities.py @@ -18,6 +18,7 @@ class ReferenceDefinition: selected_element: str kind: List[Kind] + points_to_batch: bool @dataclass(frozen=True) diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 01976fa56..2fb11b5bf 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -7,6 +7,7 @@ KIND_KEY, REFERENCE_KEY, SELECTED_ELEMENT_KEY, + SELECTOR_POINTS_TO_BATCH_KEY, Kind, ) from inference.core.workflows.execution_engine.introspection.entities import ( @@ -288,6 +289,9 @@ def retrieve_selectors_from_simple_property( Kind.model_validate(k) for k in property_definition.get(KIND_KEY, []) ], + points_to_batch=property_definition.get( + SELECTOR_POINTS_TO_BATCH_KEY, False + ), ) ] return SelectorDefinition( @@ -362,20 +366,32 @@ def retrieve_selectors_from_union_definition( results_references = list( itertools.chain.from_iterable(r.allowed_references for r in results) ) - results_references_by_selected_element = defaultdict(set) + results_references_kind_by_selected_element = defaultdict(set) + results_references_batch_pointing_by_selected_element = defaultdict(bool) for reference in results_references: - results_references_by_selected_element[reference.selected_element].update( + results_references_kind_by_selected_element[reference.selected_element].update( reference.kind ) + results_references_batch_pointing_by_selected_element[ + reference.selected_element + ] = ( + results_references_batch_pointing_by_selected_element[ + reference.selected_element + ] + or reference.points_to_batch + ) merged_references = [] for ( reference_selected_element, kind, - ) in results_references_by_selected_element.items(): + ) in results_references_kind_by_selected_element.items(): merged_references.append( ReferenceDefinition( selected_element=reference_selected_element, kind=list(kind), + points_to_batch=results_references_batch_pointing_by_selected_element[ + reference_selected_element + ], ) ) if not merged_references: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 65361acc4..0416363e2 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -276,6 +276,9 @@ def add_edge_for_step( expected_type=InputNode, ) actual_input_kind = input_node_compilation_data.input_manifest.kind + actual_input_is_batch = ( + input_node_compilation_data.input_manifest.is_batch_oriented() + ) else: other_step_compilation_data = node_as( execution_graph=execution_graph, @@ -288,6 +291,25 @@ def add_edge_for_step( selector=target_step_parsed_selector.value ), ) + actual_input_is_batch = other_step_compilation_data.is_batch_oriented() + + batch_input_expected = bool( + sum( + ref.points_to_batch + for ref in target_step_parsed_selector.definition.allowed_references + ) + ) + if not batch_input_expected and actual_input_is_batch: + property_name = target_step_parsed_selector.definition.property_name + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference `{target_step_parsed_selector.value}` plugged " + f"into property `{property_name}` of step `{source_step_selector}` - the step " + f"property do not accept batch-oriented inputs, yet the selector " + f"`{target_step_parsed_selector.value}` holds one - this indicates the problem with " + f"construction of your Workflow - usually the problem occurs when non-batch oriented " + f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) expected_input_kind = list( itertools.chain.from_iterable( ref.kind diff --git a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py index 644e41cb9..dae4c33a9 100644 --- a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py @@ -89,6 +89,8 @@ def assemble_batch_oriented_input( ) for identifier, element in enumerate(value) ] + if len(result) == 1 and len(result) != input_batch_size: + result = result * input_batch_size if len(result) != input_batch_size: raise RuntimeInputError( public_message="Expected all batch-oriented workflow inputs be the same length, or of length 1 - " diff --git a/tests/inference/unit_tests/core/interfaces/http/test_orjson_utils.py b/tests/inference/unit_tests/core/interfaces/http/test_orjson_utils.py index 4572129f2..73f5f10d6 100644 --- a/tests/inference/unit_tests/core/interfaces/http/test_orjson_utils.py +++ b/tests/inference/unit_tests/core/interfaces/http/test_orjson_utils.py @@ -1,50 +1,12 @@ -import base64 - -import cv2 import numpy as np -from inference.core.interfaces.http.orjson_utils import ( - serialise_list, - serialise_workflow_result, -) +from inference.core.interfaces.http.orjson_utils import serialise_workflow_result from inference.core.workflows.execution_engine.entities.base import ( ImageParentMetadata, WorkflowImageData, ) -def test_serialise_list() -> None: - # given - np_image = np.zeros((192, 168, 3), dtype=np.uint8) - elements = [ - 3, - "some", - WorkflowImageData( - parent_metadata=ImageParentMetadata(parent_id="some"), - numpy_image=np_image, - ), - ] - - # when - result = serialise_list(elements=elements) - - # then - assert len(result) == 3, "The same number of elements must be returned" - assert result[0] == 3, "First element of list must be untouched" - assert result[1] == "some", "Second element of list must be untouched" - assert ( - result[2]["type"] == "base64" - ), "Type of third element must be changed into base64" - decoded = base64.b64decode(result[2]["value"]) - recovered_image = cv2.imdecode( - np.fromstring(decoded, dtype=np.uint8), - cv2.IMREAD_UNCHANGED, - ) - assert ( - recovered_image == np_image - ).all(), "Recovered image should be equal to input image" - - def test_serialise_workflow_result() -> None: # given np_image = np.zeros((192, 168, 3), dtype=np.uint8) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index d1a7895c3..5bea77f36 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -1,8 +1,15 @@ import numpy as np +import pytest +import supervision as sv from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.managers.base import ModelManager +from inference.core.utils.image_utils import load_image from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.errors import ( + ExecutionGraphStructureError, + RuntimeInputError, +) from inference.core.workflows.execution_engine.core import ExecutionEngine TWO_STAGE_WORKFLOW = { @@ -109,8 +116,8 @@ "outputs": [ { "type": "JsonField", - "name": "result", - "selector": "$steps.breds_classification.*", + "name": "predictions", + "selector": "$steps.breds_classification.predictions", }, ], } @@ -170,17 +177,453 @@ def test_debug_execution_of_workflow_for_single_image_without_conditional_evalua "crops": [[e["crops"] for e in cropping_results[0]["result"]]], } ) - print(classification_results) - raise Exception() - # assert isinstance(result, list), "Expected list to be delivered" - # assert len(result) == 1, "Expected 1 element in the output for one input image" - # assert set(result[0].keys()) == { - # "predictions", - # }, "Expected all declared outputs to be delivered" - # assert ( - # len(result[0]["predictions"]) == 2 - # ), "Expected 2 dogs crops on input image, hence 2 nested classification results" - # assert [result[0]["predictions"][0]["top"], result[0]["predictions"][1]["top"]] == [ - # "116.Parson_russell_terrier", - # "131.Wirehaired_pointing_griffon", - # ], "Expected predictions to be as measured in reference run" + + # then + e2e_top_classes = [p["top"] for p in e2e_results[0]["predictions"]] + debug_top_classes = [p["top"] for p in classification_results[0]["predictions"]] + assert ( + e2e_top_classes == debug_top_classes + ), "Expected top class prediction from step-by-step execution to match e2e execution" + e2e_confidence = [p["confidence"] for p in e2e_results[0]["predictions"]] + debug_confidence = [ + p["confidence"] for p in classification_results[0]["predictions"] + ] + assert np.allclose( + e2e_confidence, debug_confidence, atol=1e-4 + ), "Expected confidences from step-by-step execution to match e2e execution" + + +def test_debug_execution_of_workflow_for_single_image_without_conditional_evaluation_when_serialization_is_requested( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + end_to_end_execution_engine = ExecutionEngine.init( + workflow_definition=TWO_STAGE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + first_step_execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + second_step_execution_engine = ExecutionEngine.init( + workflow_definition=CROP_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + third_step_execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + e2e_results = end_to_end_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + }, + serialize_results=True, + ) + detection_results = first_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + }, + serialize_results=True, + ) + detection_results_not_serialized = first_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + }, + ) + cropping_results = second_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + "predictions": detection_results[0]["result"]["predictions"], + }, + serialize_results=True, + ) + cropping_results_not_serialized = second_step_execution_engine.run( + runtime_parameters={ + "image": dogs_image, + "predictions": detection_results_not_serialized[0]["result"]["predictions"], + }, + serialize_results=False, + ) + classification_results = third_step_execution_engine.run( + runtime_parameters={ + "crops": [[e["crops"] for e in cropping_results[0]["result"]]], + }, + serialize_results=True, + ) + + # then + assert isinstance( + detection_results[0]["result"]["predictions"], dict + ), "Expected sv.Detections to be serialized" + assert isinstance( + detection_results_not_serialized[0]["result"]["predictions"], sv.Detections + ), "Expected sv.Detections not to be serialized" + deserialized_detections = sv.Detections.from_inference( + detection_results[0]["result"]["predictions"] + ) + assert np.allclose( + deserialized_detections.confidence, + detection_results_not_serialized[0]["result"]["predictions"].confidence, + atol=1e-4, + ), "Expected confidence match when serialized detections are deserialized" + intermediate_crop = cropping_results[0]["result"][0]["crops"] + assert ( + intermediate_crop["type"] == "base64" + ), "Expected crop to be serialized to base64" + decoded_image, _ = load_image(intermediate_crop) + number_of_pixels = ( + decoded_image.shape[0] * decoded_image.shape[1] * decoded_image.shape[2] + ) + assert ( + decoded_image.shape + == cropping_results_not_serialized[0]["result"][0]["crops"].numpy_image.shape + ), "Expected deserialized crop to match in size with not serialized one" + assert ( + abs( + (decoded_image.sum() / number_of_pixels) + - ( + cropping_results_not_serialized[0]["result"][0][ + "crops" + ].numpy_image.sum() + / number_of_pixels + ) + ) + < 1e-1 + ), "Content of serialized and not serialized crop should roughly match (up to compression)" + e2e_top_classes = [p["top"] for p in e2e_results[0]["predictions"]] + debug_top_classes = [p["top"] for p in classification_results[0]["predictions"]] + assert ( + e2e_top_classes == debug_top_classes + ), "Expected top class prediction from step-by-step execution to match e2e execution" + e2e_confidence = [p["confidence"] for p in e2e_results[0]["predictions"]] + debug_confidence = [ + p["confidence"] for p in classification_results[0]["predictions"] + ] + assert np.allclose( + e2e_confidence, debug_confidence, atol=1e-1 + ), "Expected confidences from step-by-step execution to match e2e execution" + + +def test_debug_execution_of_workflow_for_batch_of_images_without_conditional_evaluation( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + end_to_end_execution_engine = ExecutionEngine.init( + workflow_definition=TWO_STAGE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + first_step_execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + second_step_execution_engine = ExecutionEngine.init( + workflow_definition=CROP_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + third_step_execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + e2e_results = end_to_end_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + detection_results = first_step_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + cropping_results = second_step_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + "predictions": [ + detection_results[0]["result"]["predictions"], + detection_results[1]["result"]["predictions"], + ], + } + ) + classification_results = third_step_execution_engine.run( + runtime_parameters={ + "crops": [ + [e["crops"] for e in cropping_results[0]["result"]], + [e["crops"] for e in cropping_results[1]["result"]], + ], + } + ) + + # then + e2e_top_classes = [p["top"] for r in e2e_results for p in r["predictions"]] + debug_top_classes = [ + p["top"] for r in classification_results for p in r["predictions"] + ] + assert ( + e2e_top_classes == debug_top_classes + ), "Expected top class prediction from step-by-step execution to match e2e execution" + e2e_confidence = [p["confidence"] for r in e2e_results for p in r["predictions"]] + debug_confidence = [ + p["confidence"] for r in classification_results for p in r["predictions"] + ] + assert np.allclose( + e2e_confidence, debug_confidence, atol=1e-4 + ), "Expected confidences from step-by-step execution to match e2e execution" + + +TWO_STAGE_WORKFLOW_WITH_FLOW_CONTROL = { + "version": "1.3.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + }, + { + "type": "Crop", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.general_detection.predictions", + }, + { + "type": "roboflow_core/continue_if@v1", + "name": "verify_crop_size", + "condition_statement": { + "type": "StatementGroup", + "statements": [ + { + "type": "BinaryStatement", + "left_operand": { + "type": "DynamicOperand", + "operand_name": "crops", + "operations": [ + { + "type": "ExtractImageProperty", + "property_name": "size", + }, + ], + }, + "comparator": {"type": "(Number) >="}, + "right_operand": { + "type": "StaticOperand", + "value": 48000, + }, + } + ], + }, + "next_steps": ["$steps.breds_classification"], + "evaluation_parameters": {"crops": "$steps.cropping.crops"}, + }, + { + "type": "ClassificationModel", + "name": "breds_classification", + "image": "$steps.cropping.crops", + "model_id": "dog-breed-xpaq6/1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.breds_classification.predictions", + }, + ], +} + + +def test_debug_execution_of_workflow_for_batch_of_images_with_conditional_evaluation( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + end_to_end_execution_engine = ExecutionEngine.init( + workflow_definition=TWO_STAGE_WORKFLOW_WITH_FLOW_CONTROL, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + first_step_execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + second_step_execution_engine = ExecutionEngine.init( + workflow_definition=CROP_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + third_step_execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + e2e_results = end_to_end_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + detection_results = first_step_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + cropping_results = second_step_execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + "predictions": [ + detection_results[0]["result"]["predictions"], + detection_results[1]["result"]["predictions"], + ], + } + ) + classification_results = third_step_execution_engine.run( + runtime_parameters={ + "crops": [ + [cropping_results[0]["result"][0]["crops"], None], + [cropping_results[1]["result"][0]["crops"], None], + ], + } + ) + + # then + assert ( + e2e_results[0]["predictions"][0] is not None + ), "Expected first dog crop not to be excluded by conditional eval" + assert ( + e2e_results[0]["predictions"][1] is None + ), "Expected second dog crop to be excluded by conditional eval" + assert ( + e2e_results[1]["predictions"][0] is not None + ), "Expected first dog crop not to be excluded by conditional eval" + assert ( + e2e_results[1]["predictions"][1] is None + ), "Expected second dog crop to be excluded by conditional eval" + e2e_top_classes = [ + p["top"] if p else None for r in e2e_results for p in r["predictions"] + ] + debug_top_classes = [ + p["top"] if p else None + for r in classification_results + for p in r["predictions"] + ] + assert ( + e2e_top_classes == debug_top_classes + ), "Expected top class prediction from step-by-step execution to match e2e execution" + e2e_confidence = [ + p["confidence"] if p else -1000.0 for r in e2e_results for p in r["predictions"] + ] + debug_confidence = [ + p["confidence"] if p else -1000.0 + for r in classification_results + for p in r["predictions"] + ] + assert np.allclose( + e2e_confidence, debug_confidence, atol=1e-4 + ), "Expected confidences from step-by-step execution to match e2e execution" + + +def test_debug_execution_when_empty_batch_oriented_input_provided( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=CROP_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + with pytest.raises(RuntimeInputError): + _ = execution_engine.run( + runtime_parameters={"image": [dogs_image, dogs_image], "predictions": None} + ) + + +WORKFLOW_WITH_BATCH_ORIENTED_CONFIDENCE = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowDataBatch", + "name": "confidence", + }, + ], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + "confidence": "$inputs.confidence", + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.general_detection.*", + }, + ], +} + + +def test_workflow_run_which_hooks_up_batch_oriented_input_into_non_batch_oriented_parameters( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_CONFIDENCE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py index 72cf75818..bee91642a 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py @@ -282,7 +282,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[IMAGE_KIND] + selected_element="workflow_image", + kind=[IMAGE_KIND], + points_to_batch=True, ) ], is_list_element=False, @@ -297,6 +299,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_parameter", kind=[BOOLEAN_KIND, STRING_KIND], + points_to_batch=False, ) ], is_list_element=False, @@ -309,7 +312,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="step_output", kind=[IMAGE_KIND] + selected_element="step_output", + kind=[IMAGE_KIND], + points_to_batch=False, ) ], is_list_element=False, @@ -327,6 +332,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: BOOLEAN_KIND, OBJECT_DETECTION_PREDICTION_KIND, ], + points_to_batch=True, ) ], is_list_element=False, @@ -338,7 +344,11 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_name="step", property_description="not available", allowed_references=[ - ReferenceDefinition(selected_element="step", kind=[]) + ReferenceDefinition( + selected_element="step", + kind=[], + points_to_batch=False, + ) ], is_list_element=False, is_dict_element=False, @@ -385,10 +395,14 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[IMAGE_KIND] + selected_element="workflow_image", + kind=[IMAGE_KIND], + points_to_batch=True, ), ReferenceDefinition( - selected_element="step_output", kind=[IMAGE_KIND] + selected_element="step_output", + kind=[IMAGE_KIND], + points_to_batch=True, ), # nested list is ignored ], @@ -440,10 +454,14 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[IMAGE_KIND] + selected_element="workflow_image", + kind=[IMAGE_KIND], + points_to_batch=True, ), ReferenceDefinition( - selected_element="step_output", kind=[IMAGE_KIND] + selected_element="step_output", + kind=[IMAGE_KIND], + points_to_batch=True, ), # nested list is ignored ], @@ -495,10 +513,14 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[IMAGE_KIND] + selected_element="workflow_image", + kind=[IMAGE_KIND], + points_to_batch=True, ), ReferenceDefinition( - selected_element="step_output", kind=[IMAGE_KIND] + selected_element="step_output", + kind=[IMAGE_KIND], + points_to_batch=True, ), # nested list is ignored ], diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py index 595cf8d96..0bf3a42fc 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py @@ -79,7 +79,9 @@ def describe_outputs(cls) -> List[OutputDefinition]: property_description="not available", allowed_references=[ ReferenceDefinition( - selected_element="workflow_image", kind=[IMAGE_KIND] + selected_element="workflow_image", + kind=[IMAGE_KIND], + points_to_batch=True, ) ], is_list_element=False, @@ -100,6 +102,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_parameter", kind=[BOOLEAN_KIND, STRING_KIND], + points_to_batch=False, ) ], is_list_element=False, From d31a3d20f525ddcef97a57d0370b756b4c9155ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 31 Oct 2024 15:11:24 +0100 Subject: [PATCH 05/67] Fix batch vs non-batch oriented parameters --- .../workflows/execution_engine/constants.py | 1 + .../v1/compiler/graph_constructor.py | 78 ++++--- .../execution_engine/v1/executor/core.py | 1 - .../execution_data_manager/manager.py | 1 - .../v1/executor/output_constructor.py | 3 - .../__init__.py | 206 ++++++++++++++++++ ...st_workflow_with_arbitrary_batch_inputs.py | 173 +++++++++++++++ .../introspection/test_schema_parser.py | 2 +- 8 files changed, 434 insertions(+), 31 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index 95c3d619b..055dcc594 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -1,4 +1,5 @@ NODE_COMPILATION_OUTPUT_PROPERTY = "node_compilation_output" +PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" IMAGE_TYPE_KEY = "type" diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 0416363e2..ae7f012c6 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -20,6 +20,7 @@ ) from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, + PARSED_NODE_INPUT_SELECTORS_PROPERTY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( @@ -226,10 +227,14 @@ def add_steps_edges( execution_graph: DiGraph, ) -> DiGraph: for step in workflow_definition.steps: + source_step_selector = construct_step_selector(step_name=step.name) step_selectors = get_step_selectors(step_manifest=step) + execution_graph.nodes[source_step_selector][ + PARSED_NODE_INPUT_SELECTORS_PROPERTY + ] = step_selectors execution_graph = add_edges_for_step( execution_graph=execution_graph, - step_name=step.name, + source_step_selector=source_step_selector, target_step_parsed_selectors=step_selectors, ) return execution_graph @@ -237,10 +242,9 @@ def add_steps_edges( def add_edges_for_step( execution_graph: DiGraph, - step_name: str, + source_step_selector: str, target_step_parsed_selectors: List[ParsedSelector], ) -> DiGraph: - source_step_selector = construct_step_selector(step_name=step_name) for target_step_parsed_selector in target_step_parsed_selectors: execution_graph = add_edge_for_step( execution_graph=execution_graph, @@ -276,9 +280,6 @@ def add_edge_for_step( expected_type=InputNode, ) actual_input_kind = input_node_compilation_data.input_manifest.kind - actual_input_is_batch = ( - input_node_compilation_data.input_manifest.is_batch_oriented() - ) else: other_step_compilation_data = node_as( execution_graph=execution_graph, @@ -291,25 +292,6 @@ def add_edge_for_step( selector=target_step_parsed_selector.value ), ) - actual_input_is_batch = other_step_compilation_data.is_batch_oriented() - - batch_input_expected = bool( - sum( - ref.points_to_batch - for ref in target_step_parsed_selector.definition.allowed_references - ) - ) - if not batch_input_expected and actual_input_is_batch: - property_name = target_step_parsed_selector.definition.property_name - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference `{target_step_parsed_selector.value}` plugged " - f"into property `{property_name}` of step `{source_step_selector}` - the step " - f"property do not accept batch-oriented inputs, yet the selector " - f"`{target_step_parsed_selector.value}` holds one - this indicates the problem with " - f"construction of your Workflow - usually the problem occurs when non-batch oriented " - f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", - ) expected_input_kind = list( itertools.chain.from_iterable( ref.kind @@ -693,6 +675,52 @@ def denote_data_flow_for_step( output_dimensionality_offset=output_dimensionality_offset, ) ) + parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ + PARSED_NODE_INPUT_SELECTORS_PROPERTY + ] + input_property2batch_expected = {} + for parsed_selector in parsed_step_input_selectors: + input_property2batch_expected[parsed_selector.definition.property_name] = { + ref.points_to_batch for ref in parsed_selector.definition.allowed_references + } + for property_name, input_definition in input_data.items(): + if property_name not in input_property2batch_expected: + # only values plugged vi selectors are to be validated + continue + if input_definition.is_compound_input(): + actual_input_is_batch = { + element.is_batch_oriented() + for element in input_definition.iterate_through_definitions() + } + else: + actual_input_is_batch = {input_definition.is_batch_oriented()} + batch_input_expected = input_property2batch_expected[property_name] + if batch_input_expected == {False} and True in actual_input_is_batch: + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference plugged " + f"into property `{property_name}` of step `{node}` - the step " + f"property do not accept batch-oriented inputs, yet the input selector " + f"holds one - this indicates the problem with " + f"construction of your Workflow - usually the problem occurs when non-batch oriented " + f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) + step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() + if ( + step_accepts_batch_input + and batch_input_expected == {True} + and False in actual_input_is_batch + ): + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference plugged " + f"into property `{property_name}` of step `{node}` - the step " + f"property strictly requires batch-oriented inputs, yet the input selector " + f"holds non-batch oriented input - this indicates the " + f"problem with construction of your Workflow - usually the problem occurs when " + f"non-batch oriented step inputs are filled with outputs of non batch-oriented " + f"steps or non batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) if not parameters_with_batch_inputs: data_lineage = [] else: diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index 940b3df43..b3816bde1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -200,7 +200,6 @@ def run_simd_step_in_batch_mode( step_input = execution_data_manager.get_simd_step_input( step_selector=step_selector ) - print(f"step [{step_selector}] input: {step_input}") with profiler.profile_execution_phase( name="step_code_execution", categories=["workflow_block_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index b50050127..349cc1e46 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -245,7 +245,6 @@ def register_simd_step_output( outputs=outputs, ) return None - print(f"Registering: {step_name}, {indices}, {len(outputs)}") self._execution_cache.register_batch_of_step_outputs( step_name=step_name, indices=indices, diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 0d798b8c9..d44b9e247 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -91,11 +91,8 @@ def construct_workflow_output( ) ) for name in batch_oriented_outputs: - print(f"constructing output: {name}") array = outputs_arrays[name] - print(f"output array: {array}") indices = output_name2indices[name] - print(f"output indices: {indices}") data = execution_data_manager.get_batch_data( selector=name2selector[name], indices=indices, diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py new file mode 100644 index 000000000..4c41b9a94 --- /dev/null +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -0,0 +1,206 @@ +from typing import Any, List, Literal, Type, Union + +from pydantic import ConfigDict + +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, +) +from inference.core.workflows.execution_engine.entities.types import ( + FLOAT_ZERO_TO_ONE_KIND, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + + +class NonBatchInputBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["NonBatchInputBlock"] + non_batch_parameter: Union[WorkflowParameterSelector(), Any] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class NonBatchInputBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonBatchInputBlockManifest + + def run(self, non_batch_parameter: Any) -> BlockResult: + return {"float_value": 0.4} + + +class MixedInputWithoutBatchesBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["MixedInputWithoutBatchesBlock"] + mixed_parameter: Union[ + WorkflowParameterSelector(), + StepOutputSelector(), + Any, + ] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class MixedInputWithoutBatchesBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MixedInputWithoutBatchesBlockManifest + + def run(self, mixed_parameter: Any) -> BlockResult: + return {"float_value": 0.4} + + +class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["MixedInputWithBatchesBlock"] + mixed_parameter: Union[ + WorkflowParameterSelector(), + StepOutputSelector(), + Any, + ] + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class MixedInputWithBatchesBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MixedInputWithBatchesBlockManifest + + def run(self, mixed_parameter: Union[Batch[Any], Any]) -> BlockResult: + if isinstance(mixed_parameter, Batch): + return [{"float_value": 0.4}] * len(mixed_parameter) + return {"float_value": 0.4} + + +class BatchInputBlockProcessingBatchesManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["BatchInputBlockProcessingBatches"] + batch_parameter: StepOutputSelector() + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class BatchInputProcessingBatchesBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BatchInputBlockProcessingBatchesManifest + + def run(self, batch_parameter: Batch[Any]) -> BlockResult: + return [{"float_value": 0.4}] * len(batch_parameter) + + +class BatchInputBlockProcessingNotBatchesManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["BatchInputBlockNotProcessingBatches"] + batch_parameter: StepOutputSelector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class BatchInputNotProcessingBatchesBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BatchInputBlockProcessingNotBatchesManifest + + def run(self, batch_parameter: Batch[Any]) -> BlockResult: + return {"float_value": 0.4} + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [ + NonBatchInputBlock, + MixedInputWithBatchesBlock, + MixedInputWithoutBatchesBlock, + BatchInputProcessingBatchesBlock, + BatchInputNotProcessingBatchesBlock, + ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 5bea77f36..9d0609d76 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -1,3 +1,6 @@ +from unittest import mock +from unittest.mock import MagicMock + import numpy as np import pytest import supervision as sv @@ -11,6 +14,7 @@ RuntimeInputError, ) from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.introspection import blocks_loader TWO_STAGE_WORKFLOW = { "version": "1.3.0", @@ -627,3 +631,172 @@ def test_workflow_run_which_hooks_up_batch_oriented_input_into_non_batch_oriente init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "MixedInputWithBatchesBlock", + "name": "step_two", + "mixed_parameter": "$steps.step_one.float_value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_non_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_NOT_OPERATING_BATCH_WISE = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockNotProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_not_operating_batch_wise( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_NOT_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operating_batch_wise( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py index bee91642a..8a965273e 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py @@ -314,7 +314,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="step_output", kind=[IMAGE_KIND], - points_to_batch=False, + points_to_batch=True, ) ], is_list_element=False, From ff227d5dd018d9d80820263d7bfc739c88582539 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 31 Oct 2024 15:29:13 +0100 Subject: [PATCH 06/67] Add additional tests --- ...st_workflow_with_arbitrary_batch_inputs.py | 288 ++++++++++++++++++ 1 file changed, 288 insertions(+) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 9d0609d76..9c2a4a827 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -800,3 +800,291 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "MixedInputWithBatchesBlock", + "name": "step_two", + "mixed_parameter": "$steps.step_one.float_value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_mixed_input_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockNotProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + { + "type": "MixedInputWithBatchesBlock", + "name": "step_three", + "mixed_parameter": "$steps.step_two.float_value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_three.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_step_feeds_mixed_input_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "BatchInputBlockProcessingBatches", + "name": "step_one", + "batch_parameter": "$inputs.data", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_batch_input_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + assert result[1]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_MIXED_INPUT_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "MixedInputWithBatchesBlock", + "name": "step_one", + "mixed_parameter": "$inputs.data", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_mixed_input_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_MIXED_INPUT_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + assert result[1]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.data", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_non_batch_input_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) From 33799021770998759c94c4c914082c784aaf3b96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 31 Oct 2024 16:29:59 +0100 Subject: [PATCH 07/67] Add remaining tests --- .../__init__.py | 152 ++- ...st_workflow_with_arbitrary_batch_inputs.py | 881 ++++++++++++++++++ 2 files changed, 1032 insertions(+), 1 deletion(-) diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 4c41b9a94..5a44ed86e 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -1,4 +1,4 @@ -from typing import Any, List, Literal, Type, Union +from typing import Any, Dict, List, Literal, Type, Union from pydantic import ConfigDict @@ -196,6 +196,152 @@ def run(self, batch_parameter: Batch[Any]) -> BlockResult: return {"float_value": 0.4} +class CompoundNonBatchInputBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["CompoundNonBatchInputBlock"] + compound_parameter: Dict[str, Union[WorkflowParameterSelector(), Any]] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class CompoundNonBatchInputBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CompoundNonBatchInputBlockManifest + + def run(self, compound_parameter: Dict[str, Any]) -> BlockResult: + return {"float_value": 0.4} + + +class CompoundMixedInputBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["CompoundMixedInputBlockManifestBlock"] + compound_parameter: Dict[ + str, Union[WorkflowParameterSelector(), StepOutputSelector(), Any] + ] + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class CompoundMixedInputBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CompoundMixedInputBlockManifest + + def run(self, compound_parameter: Dict[str, Any]) -> BlockResult: + retrieved_batches = [ + v for v in compound_parameter.values() if isinstance(v, Batch) + ] + if not retrieved_batches: + return {"float_value": 0.4} + return [{"float_value": 0.4}] * len(retrieved_batches[0]) + + +class CompoundStrictBatchBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["CompoundStrictBatchBlock"] + compound_parameter: Dict[str, Union[StepOutputSelector()]] + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class CompoundStrictBatchBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CompoundStrictBatchBlockManifest + + def run(self, compound_parameter: Dict[str, Any]) -> BlockResult: + retrieved_batches = [ + v for v in compound_parameter.values() if isinstance(v, Batch) + ] + return [{"float_value": 0.4}] * len(retrieved_batches[0]) + + +class CompoundNonStrictBatchBlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "short_description": "", + "long_description": "", + "license": "Apache-2.0", + "block_type": "dummy", + } + ) + type: Literal["CompoundNonStrictBatchBlock"] + compound_parameter: Dict[str, Union[StepOutputSelector()]] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition( + name="float_value", + kind=[FLOAT_ZERO_TO_ONE_KIND], + ), + ] + + +class CompoundNonStrictBatchBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return CompoundNonStrictBatchBlockManifest + + def run(self, compound_parameter: Dict[str, Any]) -> BlockResult: + return {"float_value": 0.4} + + def load_blocks() -> List[Type[WorkflowBlock]]: return [ NonBatchInputBlock, @@ -203,4 +349,8 @@ def load_blocks() -> List[Type[WorkflowBlock]]: MixedInputWithoutBatchesBlock, BatchInputProcessingBatchesBlock, BatchInputNotProcessingBatchesBlock, + CompoundNonBatchInputBlock, + CompoundMixedInputBlock, + CompoundStrictBatchBlock, + CompoundNonStrictBatchBlock, ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 9c2a4a827..d0bf1082c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -1088,3 +1088,884 @@ def test_workflow_when_batch_oriented_input_feeds_non_batch_input_step( init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "CompoundNonBatchInputBlock", + "name": "step_two", + "compound_parameter": { + "some": "$steps.step_one.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_compound_non_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_MIXED_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "CompoundMixedInputBlockManifestBlock", + "name": "step_two", + "compound_parameter": { + "some": "$steps.step_one.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_compound_mixed_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_MIXED_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "CompoundNonStrictBatchBlock", + "name": "step_two", + "compound_parameter": { + "some": "$steps.step_one.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_compound_loosely_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "CompoundStrictBatchBlock", + "name": "step_two", + "compound_parameter": { + "some": "$steps.step_one.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_two.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_step_feeds_compound_strictly_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockNotProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + { + "type": "CompoundNonBatchInputBlock", + "name": "step_three", + "compound_parameter": { + "some": "$steps.step_two.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_three.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_step_feeds_compound_non_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_MIXED_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockNotProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + { + "type": "CompoundMixedInputBlockManifestBlock", + "name": "step_three", + "compound_parameter": { + "some": "$steps.step_two.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_three.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_step_feeds_compound_mixed_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_MIXED_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "non_batch_parameter"}, + ], + "steps": [ + { + "type": "NonBatchInputBlock", + "name": "step_one", + "non_batch_parameter": "$inputs.non_batch_parameter", + }, + { + "type": "BatchInputBlockNotProcessingBatches", + "name": "step_two", + "batch_parameter": "$steps.step_one.float_value", + }, + { + "type": "CompoundNonStrictBatchBlock", + "name": "step_three", + "compound_parameter": { + "some": "$steps.step_two.float_value", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_three.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_step_feeds_compound_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "data"}, + ], + "steps": [ + { + "type": "CompoundNonBatchInputBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_input_feeds_compound_non_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "data": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_MIXED_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "data"}, + ], + "steps": [ + { + "type": "CompoundMixedInputBlockManifestBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_input_feeds_compound_mixed_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_MIXED_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "data": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "data"}, + ], + "steps": [ + { + "type": "CompoundNonStrictBatchBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_input_feeds_compound_loosely_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run( + runtime_parameters={ + "data": "some", + } + ) + + # then + assert len(result) == 1, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowParameter", "name": "data"}, + ], + "steps": [ + { + "type": "CompoundStrictBatchBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_non_batch_oriented_input_feeds_compound_strictly_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "CompoundNonBatchInputBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_compound_non_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # when + with pytest.raises(ExecutionGraphStructureError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_MIXED_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "CompoundMixedInputBlockManifestBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_compound_mixed_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_MIXED_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + assert result[1]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "CompoundNonStrictBatchBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_compound_loosely_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_LOOSELY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + assert result[1]["result"] == 0.4, "Expected hardcoded result" + + +WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP = { + "version": "1.3.0", + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "data", + }, + ], + "steps": [ + { + "type": "CompoundStrictBatchBlock", + "name": "step_one", + "compound_parameter": { + "some": "$inputs.data", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.step_one.float_value", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_when_batch_oriented_input_feeds_compound_strictly_batch_oriented_step( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.mixed_input_characteristic_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected singular result" + assert result[0]["result"] == 0.4, "Expected hardcoded result" + assert result[1]["result"] == 0.4, "Expected hardcoded result" From 329b2ccf1592290104974a2d1b4dac0f3b80a014 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 31 Oct 2024 20:28:55 +0100 Subject: [PATCH 08/67] Apply refactor to add BatchOfDataSelector --- docs/workflows/blocks.md | 91 +++++++++---------- docs/workflows/kinds.md | 41 +++++---- .../analytics/data_aggregator/v1.py | 6 +- .../core_steps/analytics/line_counter/v1.py | 6 +- .../core_steps/analytics/line_counter/v2.py | 6 +- .../core_steps/analytics/path_deviation/v1.py | 6 +- .../core_steps/analytics/path_deviation/v2.py | 6 +- .../core_steps/analytics/time_in_zone/v1.py | 6 +- .../core_steps/analytics/time_in_zone/v2.py | 6 +- .../classical_cv/distance_measurement/v1.py | 4 +- .../classical_cv/pixel_color_count/v1.py | 4 +- .../classical_cv/sift_comparison/v1.py | 6 +- .../classical_cv/sift_comparison/v2.py | 6 +- .../classical_cv/size_measurement/v1.py | 6 +- .../core_steps/flow_control/continue_if/v1.py | 6 +- .../flow_control/rate_limiter/v1.py | 4 +- .../workflows/core_steps/formatters/csv/v1.py | 6 +- .../core_steps/formatters/expression/v1.py | 6 +- .../first_non_empty_or_default/v1.py | 4 +- .../core_steps/formatters/json_parser/v1.py | 4 +- .../formatters/property_definition/v1.py | 4 +- .../formatters/vlm_as_classifier/v1.py | 6 +- .../formatters/vlm_as_detector/v1.py | 9 +- .../detections_classes_replacement/v1.py | 6 +- .../fusion/detections_consensus/v1.py | 4 +- .../core_steps/fusion/detections_stitch/v1.py | 4 +- .../fusion/dimension_collapse/v1.py | 4 +- .../models/foundation/anthropic_claude/v1.py | 3 +- .../models/foundation/florence2/v1.py | 4 +- .../models/foundation/google_gemini/v1.py | 3 +- .../core_steps/models/foundation/openai/v2.py | 3 +- .../models/foundation/segment_anything2/v1.py | 4 +- .../foundation/stability_ai/inpainting/v1.py | 8 +- .../core_steps/sinks/email_notification/v1.py | 12 ++- .../core_steps/sinks/local_file/v1.py | 4 +- .../sinks/roboflow/custom_metadata/v1.py | 6 +- .../sinks/roboflow/dataset_upload/v1.py | 4 +- .../sinks/roboflow/dataset_upload/v2.py | 4 +- .../workflows/core_steps/sinks/webhook/v1.py | 12 +-- .../transformations/bounding_rect/v1.py | 4 +- .../transformations/byte_tracker/v1.py | 4 +- .../transformations/byte_tracker/v2.py | 4 +- .../transformations/byte_tracker/v3.py | 4 +- .../transformations/detection_offset/v1.py | 4 +- .../transformations/detections_filter/v1.py | 8 +- .../detections_transformation/v1.py | 8 +- .../transformations/dynamic_crop/v1.py | 6 +- .../transformations/dynamic_zones/v1.py | 4 +- .../perspective_correction/v1.py | 6 +- .../stabilize_detections/v1.py | 4 +- .../core_steps/visualizations/common/base.py | 4 +- .../core_steps/visualizations/halo/v1.py | 4 +- .../core_steps/visualizations/line_zone/v1.py | 8 +- .../core_steps/visualizations/mask/v1.py | 4 +- .../visualizations/model_comparison/v1.py | 6 +- .../core_steps/visualizations/polygon/v1.py | 4 +- .../visualizations/polygon_zone/v1.py | 4 +- .../visualizations/reference_path/v1.py | 4 +- .../execution_engine/entities/types.py | 18 +++- .../introspection/connections_discovery.py | 11 ++- .../v1/dynamic_blocks/block_assembler.py | 3 + .../v1/dynamic_blocks/entities.py | 1 + .../__init__.py | 16 ++-- 63 files changed, 259 insertions(+), 218 deletions(-) diff --git a/docs/workflows/blocks.md b/docs/workflows/blocks.md index 06cea3d4c..0f620e4f7 100644 --- a/docs/workflows/blocks.md +++ b/docs/workflows/blocks.md @@ -13,24 +13,6 @@ hide:
-

-

-

-

-

-

-

-

-

-

-

-

-

-

-

-

-

-

@@ -49,59 +31,76 @@ hide:

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

+

-

-

-

+

+

+

-

-

-

-

-

+

+

+

+

-

-

-

-

-

-

+

+

+

+

-

+

+

+

-

-

-

-

-

-

-

-

-

-

+

-

-

-

+

diff --git a/docs/workflows/kinds.md b/docs/workflows/kinds.md index d910b8527..37950a6eb 100644 --- a/docs/workflows/kinds.md +++ b/docs/workflows/kinds.md @@ -37,36 +37,37 @@ for the presence of a mask in the input. ## Kinds declared in Roboflow plugins +* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name +* [`point`](/workflows/kinds/point): Single point in 2D +* [`bytes`](/workflows/kinds/bytes): This kind represent bytes +* [`language_model_output`](/workflows/kinds/language_model_output): LLM / VLM output +* [`dictionary`](/workflows/kinds/dictionary): Dictionary +* [`video_metadata`](/workflows/kinds/video_metadata): Video image metadata * [`image_metadata`](/workflows/kinds/image_metadata): Dictionary with image metadata required by supervision +* [`zone`](/workflows/kinds/zone): Definition of polygon zone +* [`rgb_color`](/workflows/kinds/rgb_color): RGB color * [`string`](/workflows/kinds/string): String value +* [`serialised_payloads`](/workflows/kinds/serialised_payloads): Serialised element that is usually accepted by sink +* [`detection`](/workflows/kinds/detection): Single element of detections-based prediction (like `object_detection_prediction`) +* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any type * [`numpy_array`](/workflows/kinds/numpy_array): Numpy array -* [`parent_id`](/workflows/kinds/parent_id): Identifier of parent for step output * [`qr_code_detection`](/workflows/kinds/qr_code_detection): Prediction with QR code detection * [`float`](/workflows/kinds/float): Float value -* [`dictionary`](/workflows/kinds/dictionary): Dictionary -* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]` -* [`object_detection_prediction`](/workflows/kinds/object_detection_prediction): Prediction with detected bounding boxes in form of sv.Detections(...) object * [`*`](/workflows/kinds/*): Equivalent of any element * [`bar_code_detection`](/workflows/kinds/bar_code_detection): Prediction with barcode detection +* [`object_detection_prediction`](/workflows/kinds/object_detection_prediction): Prediction with detected bounding boxes in form of sv.Detections(...) object +* [`image_keypoints`](/workflows/kinds/image_keypoints): Image keypoints detected by classical Computer Vision method +* [`keypoint_detection_prediction`](/workflows/kinds/keypoint_detection_prediction): Prediction with detected bounding boxes and detected keypoints in form of sv.Detections(...) object +* [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]` +* [`image`](/workflows/kinds/image): Image in workflows * [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id -* [`contours`](/workflows/kinds/contours): List of numpy arrays where each array represents contour points -* [`serialised_payloads`](/workflows/kinds/serialised_payloads): Serialised element that is usually accepted by sink -* [`video_metadata`](/workflows/kinds/video_metadata): Video image metadata +* [`integer`](/workflows/kinds/integer): Integer value * [`top_class`](/workflows/kinds/top_class): String value representing top class predicted by classification model -* [`language_model_output`](/workflows/kinds/language_model_output): LLM / VLM output -* [`image`](/workflows/kinds/image): Image in workflows -* [`roboflow_api_key`](/workflows/kinds/roboflow_api_key): Roboflow API key -* [`rgb_color`](/workflows/kinds/rgb_color): RGB color * [`boolean`](/workflows/kinds/boolean): Boolean flag -* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name -* [`image_keypoints`](/workflows/kinds/image_keypoints): Image keypoints detected by classical Computer Vision method -* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any type -* [`zone`](/workflows/kinds/zone): Definition of polygon zone -* [`point`](/workflows/kinds/point): Single point in 2D -* [`prediction_type`](/workflows/kinds/prediction_type): String value with type of prediction +* [`roboflow_api_key`](/workflows/kinds/roboflow_api_key): Roboflow API key * [`instance_segmentation_prediction`](/workflows/kinds/instance_segmentation_prediction): Prediction with detected bounding boxes and segmentation masks in form of sv.Detections(...) object -* [`integer`](/workflows/kinds/integer): Integer value -* [`keypoint_detection_prediction`](/workflows/kinds/keypoint_detection_prediction): Prediction with detected bounding boxes and detected keypoints in form of sv.Detections(...) object +* [`contours`](/workflows/kinds/contours): List of numpy arrays where each array represents contour points +* [`prediction_type`](/workflows/kinds/prediction_type): String value with type of prediction +* [`parent_id`](/workflows/kinds/parent_id): Identifier of parent for step output * [`classification_prediction`](/workflows/kinds/classification_prediction): Predictions from classifier -* [`detection`](/workflows/kinds/detection): Single element of detections-based prediction (like `object_detection_prediction`) diff --git a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py index 5f467e249..d8d852658 100644 --- a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py +++ b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py @@ -18,7 +18,7 @@ FLOAT_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -194,7 +194,9 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/data_aggregator@v1"] data: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector()], + Union[ + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + ], ] = Field( description="References data to be used to construct each and every column", examples=[ diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v1.py b/inference/core/workflows/core_steps/analytics/line_counter/v1.py index e1892119c..b31b41195 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v1.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v1.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -50,7 +50,7 @@ class LineCounterManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/line_counter@v1"] metadata: WorkflowVideoMetadataSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -60,7 +60,7 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v2.py b/inference/core/workflows/core_steps/analytics/line_counter/v2.py index b9657842c..e54edfc52 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v2.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v2.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -55,7 +55,7 @@ class LineCounterManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/line_counter@v2"] image: WorkflowImageSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,7 +65,7 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py index 276e27caf..c846b9b46 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py @@ -17,7 +17,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -53,7 +53,7 @@ class PathDeviationManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/path_deviation_analytics@v1"] metadata: WorkflowVideoMetadataSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -67,7 +67,7 @@ class PathDeviationManifest(WorkflowBlockManifest): default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py index 4fedef16e..06c2611ff 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py @@ -17,7 +17,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -54,7 +54,7 @@ class PathDeviationManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/path_deviation_analytics@v2"] image: WorkflowImageSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,7 +68,7 @@ class PathDeviationManifest(WorkflowBlockManifest): default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index 5c3c78d4a..f5cc36f92 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -19,8 +19,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, @@ -58,7 +58,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], ) metadata: WorkflowVideoMetadataSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -67,7 +67,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index 359f51473..623a88c28 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -18,7 +18,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -59,7 +59,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], ) - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,7 +68,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py index b51d91c93..7422d63f5 100644 --- a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py @@ -10,7 +10,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -46,7 +46,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/distance_measurement@v1"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index a6b803608..e803b171f 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -12,8 +12,8 @@ INTEGER_KIND, RGB_COLOR_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -48,7 +48,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): ) target_color: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[RGB_COLOR_KIND]), + BatchOfDataSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py index 890818506..1bca70f36 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py @@ -9,7 +9,7 @@ BOOLEAN_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -39,11 +39,11 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/sift_comparison@v1"] - descriptor_1: StepOutputSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_1: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the first image to compare", examples=["$steps.sift.descriptors"], ) - descriptor_2: StepOutputSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_2: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the second image to compare", examples=["$steps.sift.descriptors"], ) diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py index 3b401a073..e3ba85b19 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py @@ -17,8 +17,8 @@ INTEGER_KIND, NUMPY_ARRAY_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -52,7 +52,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): input_1: Union[ WorkflowImageSelector, StepOutputImageSelector, - StepOutputSelector(kind=[NUMPY_ARRAY_KIND]), + BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]), ] = Field( description="Reference to Image or SIFT descriptors from the first image to compare", examples=["$inputs.image1", "$steps.sift.descriptors"], @@ -60,7 +60,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): input_2: Union[ WorkflowImageSelector, StepOutputImageSelector, - StepOutputSelector(kind=[NUMPY_ARRAY_KIND]), + BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]), ] = Field( description="Reference to Image or SIFT descriptors from the second image to compare", examples=["$inputs.image2", "$steps.sift.descriptors"], diff --git a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py index 7ee90ea86..f75ed53ee 100644 --- a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -55,7 +55,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): } ) type: Literal[f"roboflow_core/size_measurement@v1"] - reference_predictions: StepOutputSelector( + reference_predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -64,7 +64,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): description="Predictions from the reference object model", examples=["$segmentation.reference_predictions"], ) - object_predictions: StepOutputSelector( + object_predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py index e3a78607b..3eb011e94 100644 --- a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py +++ b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py @@ -10,7 +10,7 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - StepOutputSelector, + BatchOfDataSelector, StepSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -63,7 +63,9 @@ class BlockManifest(WorkflowBlockManifest): ) evaluation_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector()], + Union[ + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + ], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[{"left": "$inputs.some"}], diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index 17c03e324..babf16677 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -5,7 +5,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - StepOutputSelector, + BatchOfDataSelector, StepSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -62,7 +62,7 @@ class RateLimiterManifest(WorkflowBlockManifest): ge=0.0, ) depends_on: Union[ - WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector() + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() ] = Field( description="Reference to any output of the the step which immediately preceeds this branch.", examples=["$steps.model"], diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index efc1484fd..da13ca5ab 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -16,10 +16,8 @@ OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BOOLEAN_KIND, - INTEGER_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -142,7 +140,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ WorkflowImageSelector, WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), str, int, float, diff --git a/inference/core/workflows/core_steps/formatters/expression/v1.py b/inference/core/workflows/core_steps/formatters/expression/v1.py index 7bbc2d7fc..51a951b66 100644 --- a/inference/core/workflows/core_steps/formatters/expression/v1.py +++ b/inference/core/workflows/core_steps/formatters/expression/v1.py @@ -16,7 +16,7 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -109,7 +109,9 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/expression@v1", "Expression"] data: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector()], + Union[ + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + ], ] = Field( description="References data to be used to construct results", examples=[ diff --git a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py index b3c05ce53..b50da32cd 100644 --- a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py +++ b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py @@ -6,7 +6,7 @@ Batch, OutputDefinition, ) -from inference.core.workflows.execution_engine.entities.types import StepOutputSelector +from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -35,7 +35,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/first_non_empty_or_default@v1", "FirstNonEmptyOrDefault" ] - data: List[StepOutputSelector()] = Field( + data: List[BatchOfDataSelector()] = Field( description="Reference data to replace empty values", examples=["$steps.my_step.predictions"], min_items=1, diff --git a/inference/core/workflows/core_steps/formatters/json_parser/v1.py b/inference/core/workflows/core_steps/formatters/json_parser/v1.py index 0e1a0f1b3..a8dec3c80 100644 --- a/inference/core/workflows/core_steps/formatters/json_parser/v1.py +++ b/inference/core/workflows/core_steps/formatters/json_parser/v1.py @@ -10,7 +10,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, LANGUAGE_MODEL_OUTPUT_KIND, - StepOutputSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/json_parser@v1"] - raw_json: StepOutputSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + raw_json: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( description="The string with raw JSON to parse.", examples=[["$steps.lmm.output"]], ) diff --git a/inference/core/workflows/core_steps/formatters/property_definition/v1.py b/inference/core/workflows/core_steps/formatters/property_definition/v1.py index a5669a220..5551866fc 100644 --- a/inference/core/workflows/core_steps/formatters/property_definition/v1.py +++ b/inference/core/workflows/core_steps/formatters/property_definition/v1.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -60,7 +60,7 @@ class BlockManifest(WorkflowBlockManifest): "PropertyDefinition", "PropertyExtraction", ] - data: StepOutputSelector( + data: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index ee07cd771..478f776cb 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -16,8 +16,8 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -70,14 +70,14 @@ class BlockManifest(WorkflowBlockManifest): description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: StepOutputSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], ) classes: Union[ WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), - StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), + BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] = Field( description="List of all classes used by the model, required to " diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index 8ebb1c7af..2e527e088 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -31,8 +31,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -90,14 +90,15 @@ class BlockManifest(WorkflowBlockManifest): "long_description": LONG_DESCRIPTION, "license": "Apache-2.0", "block_type": "formatter", - } + }, + protected_namespaces=(), ) type: Literal["roboflow_core/vlm_as_detector@v1"] image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: StepOutputSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], @@ -105,7 +106,7 @@ class BlockManifest(WorkflowBlockManifest): classes: Optional[ Union[ WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), - StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), + BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] ] = Field( diff --git a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py index 26bfb287e..7856e7870 100644 --- a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py @@ -19,7 +19,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -54,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): "roboflow_core/detections_classes_replacement@v1", "DetectionsClassesReplacement", ] - object_detection_predictions: StepOutputSelector( + object_detection_predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,7 +65,7 @@ class BlockManifest(WorkflowBlockManifest): description="The output of a detection model describing the bounding boxes that will have classes replaced.", examples=["$steps.my_object_detection_model.predictions"], ) - classification_predictions: StepOutputSelector( + classification_predictions: BatchOfDataSelector( kind=[CLASSIFICATION_PREDICTION_KIND] ) = Field( title="Classification results for crops", diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index c3ef35aff..47f50ec37 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -35,8 +35,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -81,7 +81,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/detections_consensus@v1", "DetectionsConsensus"] predictions_batches: List[ - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 1221e7f65..81b918cc2 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -23,9 +23,9 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, FloatZeroToOne, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): description="Image that was origin to take crops that yielded predictions.", examples=["$inputs.image"], ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index a9932f996..d59364e43 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( LIST_OF_VALUES_KIND, - StepOutputSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -42,7 +42,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/dimension_collapse@v1", "DimensionCollapse"] - data: StepOutputSelector() = Field( + data: BatchOfDataSelector() = Field( description="Reference to step outputs at depth level n to be concatenated and moved into level n-1.", examples=["$steps.ocr_step.results"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 3b42cf520..01a3c4171 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -95,7 +95,8 @@ class BlockManifest(WorkflowBlockManifest): "search_keywords": ["LMM", "VLM", "Claude", "Anthropic"], "is_vlm_block": True, "task_type_property": "task_type", - } + }, + protected_namespaces=(), ) type: Literal["roboflow_core/anthropic_claude@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 930977a6a..cedd65eea 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -22,9 +22,9 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, ImageInputField, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -218,7 +218,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ List[int], List[float], - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index 4f7a6285c..ce6908f42 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -104,7 +104,8 @@ class BlockManifest(WorkflowBlockManifest): "beta": True, "is_vlm_block": True, "task_type_property": "task_type", - } + }, + protected_namespaces=(), ) type: Literal["roboflow_core/google_gemini@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index 0903d9d3a..b9d9ae379 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -94,7 +94,8 @@ class BlockManifest(WorkflowBlockManifest): "search_keywords": ["LMM", "VLM", "ChatGPT", "GPT", "OpenAI"], "is_vlm_block": True, "task_type_property": "task_type", - } + }, + protected_namespaces=(), ) type: Literal["roboflow_core/open_ai@v2"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index c005c20c6..20f73d630 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -37,9 +37,9 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, ImageInputField, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -82,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/segment_anything@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField boxes: Optional[ - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index d5e5e0db7..1be95d083 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -19,8 +19,8 @@ IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -68,7 +68,7 @@ class BlockManifest(WorkflowBlockManifest): description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - segmentation_mask: StepOutputSelector( + segmentation_mask: BatchOfDataSelector( kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND] ) = Field( name="Segmentation Mask", @@ -77,7 +77,7 @@ class BlockManifest(WorkflowBlockManifest): ) prompt: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[STRING_KIND]), + BatchOfDataSelector(kind=[STRING_KIND]), str, ] = Field( description="Prompt to inpainting model (what you wish to see)", @@ -86,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): negative_prompt: Optional[ Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[STRING_KIND]), + BatchOfDataSelector(kind=[STRING_KIND]), str, ] ] = Field( diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index 954b2fc0f..4473571fe 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -28,7 +28,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -59,7 +59,7 @@ message using dynamic parameters: ``` -message = "This is example notification. Predicted classes: {{ $parameters.predicted_classes }}" +message = "This is example notification. Predicted classes: \{\{ $parameters.predicted_classes \}\}" ``` Message parameters are delivered by Workflows Execution Engine by setting proper data selectors in @@ -174,7 +174,7 @@ class BlockManifest(WorkflowBlockManifest): message: str = Field( description="Content of the message to be send", examples=[ - "During last 5 minutes detected {{ $parameters.num_instances }} instances" + "During last 5 minutes detected \{\{ $parameters.num_instances \}\} instances" ], ) sender_email: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( @@ -213,7 +213,9 @@ class BlockManifest(WorkflowBlockManifest): ) message_parameters: Dict[ str, - Union[WorkflowParameterSelector(), StepOutputSelector(), str, int, float, bool], + Union[ + WorkflowParameterSelector(), BatchOfDataSelector(), str, int, float, bool + ], ] = Field( description="References data to be used to construct each and every column", examples=[ @@ -235,7 +237,7 @@ class BlockManifest(WorkflowBlockManifest): ], default_factory=dict, ) - attachments: Dict[str, StepOutputSelector(kind=[STRING_KIND])] = Field( + attachments: Dict[str, BatchOfDataSelector(kind=[STRING_KIND])] = Field( description="Attachments", default_factory=dict, examples=[{"report.cvs": "$steps.csv_formatter.csv_content"}], diff --git a/inference/core/workflows/core_steps/sinks/local_file/v1.py b/inference/core/workflows/core_steps/sinks/local_file/v1.py index 11b638c4d..a9d22ea94 100644 --- a/inference/core/workflows/core_steps/sinks/local_file/v1.py +++ b/inference/core/workflows/core_steps/sinks/local_file/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -77,7 +77,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/local_file_sink@v1"] - content: StepOutputSelector(kind=[STRING_KIND]) = Field( + content: BatchOfDataSelector(kind=[STRING_KIND]) = Field( description="Content of the file to save", examples=["$steps.csv_formatter.csv_content"], ) diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index ffc83eaa3..91489515e 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -20,7 +20,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -55,7 +55,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_custom_metadata@v1", "RoboflowCustomMetadata"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -69,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): field_value: Union[ str, WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[STRING_KIND]), + BatchOfDataSelector(kind=[STRING_KIND]), ] = Field( description="This is the name of the metadata field you are creating", examples=["toronto", "pass", "fail"], diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index a1ee3e75b..6f353186b 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -63,9 +63,9 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchOfDataSelector, ImageInputField, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -106,7 +106,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/roboflow_dataset_upload@v1", "RoboflowDatasetUpload"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField predictions: Optional[ - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index efc511200..646f8afb9 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -25,9 +25,9 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchOfDataSelector, ImageInputField, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -82,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): json_schema_extra={"hidden": True}, ) predictions: Optional[ - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index ef3f3a0d9..26dd197f4 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -27,7 +27,7 @@ ROBOFLOW_PROJECT_KIND, STRING_KIND, TOP_CLASS_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -174,7 +174,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(kind=QUERY_PARAMS_KIND), - StepOutputSelector(kind=QUERY_PARAMS_KIND), + BatchOfDataSelector(kind=QUERY_PARAMS_KIND), str, float, bool, @@ -190,7 +190,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(kind=HEADER_KIND), - StepOutputSelector(kind=HEADER_KIND), + BatchOfDataSelector(kind=HEADER_KIND), str, float, bool, @@ -205,7 +205,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), str, float, bool, @@ -234,7 +234,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), str, float, bool, @@ -266,7 +266,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), str, float, bool, diff --git a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py index 49798474a..6e0312d24 100644 --- a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py +++ b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +47,7 @@ class BoundingRectManifest(WorkflowBlockManifest): } ) type: Literal[f"roboflow_core/bounding_rect@v1"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py index 3ef4aee75..205e51a9f 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py @@ -12,7 +12,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -52,7 +52,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v1"] metadata: WorkflowVideoMetadataSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py index b6ecfab10..d99a2293d 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py @@ -13,7 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -58,7 +58,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v2"] image: WorkflowImageSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py index 9f0e88ddf..4290c3405 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -73,7 +73,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v3"] image: WorkflowImageSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index 21dbc4f87..0d98c9b68 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -19,7 +19,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -51,7 +51,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detection_offset@v1", "DetectionOffset"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index bae47260b..2ed841f45 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -18,7 +18,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -71,7 +71,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detections_filter@v1", "DetectionsFilter"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -86,7 +86,9 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector()], + Union[ + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + ], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 67e6757fc..0629ca4df 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -24,7 +24,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -85,7 +85,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/detections_transformation@v1", "DetectionsTransformation" ] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -101,7 +101,9 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), StepOutputSelector()], + Union[ + WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + ], ] = Field( description="References to additional parameters that may be provided in runtime to parameterize operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 60ce2b988..1b381ab5f 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -22,8 +22,8 @@ OBJECT_DETECTION_PREDICTION_KIND, RGB_COLOR_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -66,7 +66,7 @@ class BlockManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -98,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): ) background_color: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - StepOutputSelector(kind=[RGB_COLOR_KIND]), + BatchOfDataSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index b2dc378ae..612e80b46 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -13,7 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -49,7 +49,7 @@ class DynamicZonesManifest(WorkflowBlockManifest): } ) type: Literal[f"{TYPE}", "DynamicZone"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index a2fd515dd..6ccb46dd9 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -23,8 +23,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -62,7 +62,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/perspective_correction@v1", "PerspectiveCorrection"] predictions: Optional[ - StepOutputSelector( + BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -79,7 +79,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - perspective_polygons: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + perspective_polygons: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Perspective polygons (for each batch at least one must be consisting of 4 vertices)", examples=["$steps.perspective_wrap.zones"], ) diff --git a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py index 5a96b27be..384c46e68 100644 --- a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -47,7 +47,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/stabilize_detections@v1"] image: WorkflowImageSelector - detections: StepOutputSelector( + detections: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index bdaa6e0aa..77f80af48 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -14,8 +14,8 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, + BatchOfDataSelector, StepOutputImageSelector, - StepOutputSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -80,7 +80,7 @@ def run( class PredictionsVisualizationManifest(VisualizationManifest, ABC): - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index 7d74b78f8..e2ba6caa7 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -18,8 +18,8 @@ FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -46,7 +46,7 @@ class HaloManifest(ColorableVisualizationManifest): } ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py index eb3800fcc..d68d53865 100644 --- a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py @@ -19,8 +19,8 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -47,7 +47,7 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points.", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) @@ -71,13 +71,13 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): default=1.0, examples=[1.0, "$inputs.text_scale"], ) - count_in: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), StepOutputSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_in: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchOfDataSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed into the line zone.", default=0, examples=["$steps.line_counter.count_in"], json_schema_extra={"always_visible": True}, ) - count_out: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), StepOutputSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_out: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchOfDataSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed out of the line zone.", default=0, examples=["$steps.line_counter.count_out"], diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index 975390ea9..9933d8183 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -14,8 +14,8 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -42,7 +42,7 @@ class MaskManifest(ColorableVisualizationManifest): } ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py index d3e2484a1..8bf69451b 100644 --- a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py +++ b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py @@ -19,8 +19,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -52,7 +52,7 @@ class ModelComparisonManifest(VisualizationManifest): } ) - predictions_a: StepOutputSelector( + predictions_a: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -69,7 +69,7 @@ class ModelComparisonManifest(VisualizationManifest): examples=["GREEN", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_a"], ) - predictions_b: StepOutputSelector( + predictions_b: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index e93d056cc..8bad2b931 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -17,7 +17,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -44,7 +44,7 @@ class PolygonManifest(ColorableVisualizationManifest): } ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py index f63cdd1c8..acd9762e0 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py @@ -17,8 +17,8 @@ FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchOfDataSelector, FloatZeroToOne, - StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -45,7 +45,7 @@ class PolygonZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Polygon zones (one for each batch) in a format [[(x1, y1), (x2, y2), (x3, y3), ...], ...];" " each zone must consist of more than 2 points", examples=["$inputs.zones"], diff --git a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py index 619728b07..5bb385e2c 100644 --- a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py +++ b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py @@ -14,7 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -45,7 +45,7 @@ class ReferencePathVisualizationManifest(VisualizationManifest): ) reference_path: Union[ list, - StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), + BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), ] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 02f02e1d4..439f32185 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1023,6 +1023,7 @@ def __hash__(self) -> int: STEP_AS_SELECTED_ELEMENT = "step" STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" +BATCH_OF_DATA_AS_SELECTED_ELEMENT = "batch_of_data" StepSelector = Annotated[ str, @@ -1058,10 +1059,25 @@ def StepOutputSelector(kind: Optional[List[Kind]] = None): KIND_KEY: [k.dict() for k in kind], SELECTOR_POINTS_TO_BATCH_KEY: True, } + return Annotated[ + str, + StringConstraints(pattern=r"^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$"), + Field(json_schema_extra=json_schema_extra), + ] + + +def BatchOfDataSelector(kind: Optional[List[Kind]] = None): + if kind is None: + kind = [WILDCARD_KIND] + json_schema_extra = { + REFERENCE_KEY: True, + SELECTED_ELEMENT_KEY: BATCH_OF_DATA_AS_SELECTED_ELEMENT, + KIND_KEY: [k.dict() for k in kind], + SELECTOR_POINTS_TO_BATCH_KEY: True, + } return Annotated[ str, StringConstraints( - # pattern=r"^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$" pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" ), Field(json_schema_extra=json_schema_extra), diff --git a/inference/core/workflows/execution_engine/introspection/connections_discovery.py b/inference/core/workflows/execution_engine/introspection/connections_discovery.py index 7aec6d53a..33ef13f68 100644 --- a/inference/core/workflows/execution_engine/introspection/connections_discovery.py +++ b/inference/core/workflows/execution_engine/introspection/connections_discovery.py @@ -2,6 +2,7 @@ from typing import Dict, Generator, List, Set, Tuple, Type from inference.core.workflows.execution_engine.entities.types import ( + BATCH_OF_DATA_AS_SELECTED_ELEMENT, STEP_AS_SELECTED_ELEMENT, STEP_OUTPUT_AS_SELECTED_ELEMENT, WILDCARD_KIND, @@ -40,9 +41,13 @@ def discover_blocks_connections( blocks_description=blocks_description, all_schemas=all_schemas, ) + compatible_elements = { + STEP_OUTPUT_AS_SELECTED_ELEMENT, + BATCH_OF_DATA_AS_SELECTED_ELEMENT, + } coarse_input_kind2schemas = convert_kinds_mapping_to_block_wise_format( detailed_input_kind2schemas=detailed_input_kind2schemas, - compatible_elements={STEP_OUTPUT_AS_SELECTED_ELEMENT}, + compatible_elements=compatible_elements, ) input_property_wise_connections = {} output_property_wise_connections = {} @@ -51,6 +56,7 @@ def discover_blocks_connections( starting_block=block_type, all_schemas=all_schemas, output_kind2schemas=output_kind2schemas, + compatible_elements=compatible_elements, ) manifest_type = block_type2manifest_type[block_type] output_property_wise_connections[block_type] = ( @@ -167,12 +173,13 @@ def discover_block_input_connections( starting_block: Type[WorkflowBlock], all_schemas: Dict[Type[WorkflowBlock], BlockManifestMetadata], output_kind2schemas: Dict[str, Set[Type[WorkflowBlock]]], + compatible_elements: Set[str], ) -> Dict[str, Set[Type[WorkflowBlock]]]: result = {} for selector in all_schemas[starting_block].selectors.values(): blocks_matching_property = set() for allowed_reference in selector.allowed_references: - if allowed_reference.selected_element != STEP_OUTPUT_AS_SELECTED_ELEMENT: + if allowed_reference.selected_element not in compatible_elements: continue for single_kind in allowed_reference.kind: blocks_matching_property.update( diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 6355ef7c8..aa06154f5 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -12,6 +12,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( WILDCARD_KIND, + BatchOfDataSelector, Kind, StepOutputImageSelector, StepOutputSelector, @@ -249,6 +250,8 @@ def collect_python_types_for_selectors( result.append(WorkflowParameterSelector(kind=selector_kind)) elif selector_type is SelectorType.STEP_OUTPUT: result.append(StepOutputSelector(kind=selector_kind)) + elif selector_type is SelectorType.BATCH_OF_DATA: + result.append(BatchOfDataSelector(kind=selector_kind)) else: raise DynamicBlockError( public_message=f"Could not recognise selector type `{selector_type}` declared for input `{input_name}` " diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 79c52c68a..72f473b20 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -9,6 +9,7 @@ class SelectorType(Enum): STEP_OUTPUT_IMAGE = "step_output_image" INPUT_PARAMETER = "input_parameter" STEP_OUTPUT = "step_output" + BATCH_OF_DATA = "batch_of_data" class ValueType(Enum): diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 5a44ed86e..0d1883994 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -62,7 +62,7 @@ class MixedInputWithoutBatchesBlockManifest(WorkflowBlockManifest): type: Literal["MixedInputWithoutBatchesBlock"] mixed_parameter: Union[ WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), Any, ] @@ -98,7 +98,7 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): type: Literal["MixedInputWithBatchesBlock"] mixed_parameter: Union[ WorkflowParameterSelector(), - StepOutputSelector(), + BatchOfDataSelector(), Any, ] @@ -138,7 +138,7 @@ class BatchInputBlockProcessingBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockProcessingBatches"] - batch_parameter: StepOutputSelector() + batch_parameter: BatchOfDataSelector() @classmethod def accepts_batch_input(cls) -> bool: @@ -174,7 +174,7 @@ class BatchInputBlockProcessingNotBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockNotProcessingBatches"] - batch_parameter: StepOutputSelector() + batch_parameter: BatchOfDataSelector() @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -239,7 +239,7 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): ) type: Literal["CompoundMixedInputBlockManifestBlock"] compound_parameter: Dict[ - str, Union[WorkflowParameterSelector(), StepOutputSelector(), Any] + str, Union[WorkflowParameterSelector(), BatchOfDataSelector(), Any] ] @classmethod @@ -281,7 +281,7 @@ class CompoundStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundStrictBatchBlock"] - compound_parameter: Dict[str, Union[StepOutputSelector()]] + compound_parameter: Dict[str, Union[BatchOfDataSelector()]] @classmethod def accepts_batch_input(cls) -> bool: @@ -320,7 +320,7 @@ class CompoundNonStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundNonStrictBatchBlock"] - compound_parameter: Dict[str, Union[StepOutputSelector()]] + compound_parameter: Dict[str, Union[BatchOfDataSelector()]] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: From 0a6fff4d2366a5d4f92f4acef48c76e0c5652272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 1 Nov 2024 17:07:29 +0100 Subject: [PATCH 09/67] WIP - add more tests --- .../v1/executor/runtime_input_assembler.py | 25 ++- .../executor/test_runtime_input_assembler.py | 145 +++++++++++++++++- 2 files changed, 162 insertions(+), 8 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py index dae4c33a9..fbe4f5c69 100644 --- a/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/runtime_input_assembler.py @@ -1,4 +1,4 @@ -from typing import Any, Callable, Dict, List, Optional, Union +from typing import Any, Callable, Dict, List, Optional, Union, Tuple from inference.core.workflows.errors import AssumptionError, RuntimeInputError from inference.core.workflows.execution_engine.entities.base import InputType @@ -155,12 +155,10 @@ def assemble_single_element_of_batch_oriented_input( ) -> Any: if value is None: return None - matching_deserializers = [] - for kind in defined_input.kind: - kind_name = _get_kind_name(kind=kind) - if kind_name not in kinds_deserializers: - continue - matching_deserializers.append((kind_name, kinds_deserializers[kind_name])) + matching_deserializers = _get_matching_deserializers( + defined_input=defined_input, + kinds_deserializers=kinds_deserializers, + ) if not matching_deserializers: return value parameter_identifier = defined_input.name @@ -192,6 +190,19 @@ def assemble_single_element_of_batch_oriented_input( ) +def _get_matching_deserializers( + defined_input: InputType, + kinds_deserializers: Dict[str, Callable[[str, Any], Any]], +) -> List[Tuple[str, Callable[[str, Any], Any]]]: + matching_deserializers = [] + for kind in defined_input.kind: + kind_name = _get_kind_name(kind=kind) + if kind_name not in kinds_deserializers: + continue + matching_deserializers.append((kind_name, kinds_deserializers[kind_name])) + return matching_deserializers + + def _get_kind_name(kind: Union[Kind, str]) -> str: if isinstance(kind, Kind): return kind.name diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py index 2b866b98e..fe9f47a61 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py @@ -14,8 +14,10 @@ VideoMetadata, WorkflowImage, WorkflowParameter, - WorkflowVideoMetadata, + WorkflowVideoMetadata, WorkflowDataBatch, WorkflowImageData, ) +from inference.core.workflows.execution_engine.entities.types import IMAGE_KIND, STRING_KIND, INTEGER_KIND, \ + LIST_OF_VALUES_KIND, BOOLEAN_KIND, DICTIONARY_KIND, FLOAT_KIND from inference.core.workflows.execution_engine.v1.executor.runtime_input_assembler import ( assemble_runtime_parameters, ) @@ -296,10 +298,12 @@ def test_assemble_runtime_parameters_when_images_with_different_matching_batch_s }, ], "image2": np.zeros((192, 168, 3), dtype=np.uint8), + "image3": [np.zeros((192, 168, 3), dtype=np.uint8)], } defined_inputs = [ WorkflowImage(type="WorkflowImage", name="image1"), WorkflowImage(type="WorkflowImage", name="image2"), + WorkflowImage(type="WorkflowImage", name="image3"), ] # when @@ -322,6 +326,12 @@ def test_assemble_runtime_parameters_when_images_with_different_matching_batch_s assert np.allclose( result["image2"][1].numpy_image, np.zeros((192, 168, 3), dtype=np.uint8) ), "Empty image expected" + assert np.allclose( + result["image3"][0].numpy_image, np.zeros((192, 168, 3), dtype=np.uint8) + ), "Empty image expected" + assert np.allclose( + result["image3"][1].numpy_image, np.zeros((192, 168, 3), dtype=np.uint8) + ), "Empty image expected" def test_assemble_runtime_parameters_when_images_with_different_and_not_matching_batch_sizes_provided() -> ( @@ -522,3 +532,136 @@ def test_assemble_runtime_parameters_when_video_metadata_with_different_and_not_ defined_inputs=defined_inputs, kinds_deserializers=KINDS_DESERIALIZERS, ) + + +def test_assemble_runtime_parameters_when_parameters_at_different_dimensionality_depth_emerge() -> None: + # given + runtime_parameters = { + "image1": [ + np.zeros((192, 168, 3), dtype=np.uint8), + np.zeros((192, 168, 3), dtype=np.uint8), + ], + "image2": [ + [np.zeros((192, 168, 3), dtype=np.uint8), np.zeros((192, 168, 3), dtype=np.uint8)], + [np.zeros((192, 168, 3), dtype=np.uint8),] + ], + "image3": [ + [ + [np.zeros((192, 168, 3), dtype=np.uint8)], + [np.zeros((192, 168, 3), dtype=np.uint8), np.zeros((192, 168, 3), dtype=np.uint8)], + ], + [ + [np.zeros((192, 168, 3), dtype=np.uint8)], + [np.zeros((192, 168, 3), dtype=np.uint8), np.zeros((192, 168, 3), dtype=np.uint8)], + [np.zeros((192, 168, 3), dtype=np.uint8)], + ], + ], + } + defined_inputs = [ + WorkflowDataBatch(type="WorkflowDataBatch", name="image1", kind=["image"]), + WorkflowDataBatch(type="WorkflowDataBatch", name="image2", kind=[IMAGE_KIND], dimensionality=2), + WorkflowDataBatch(type="WorkflowDataBatch", name="image3", kind=["image"], dimensionality=3), + ] + + # when + result = assemble_runtime_parameters( + runtime_parameters=runtime_parameters, + defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, + ) + + # then + assert len(result["image1"]) == 2, "image1 is 1D batch of size (2, )" + assert all(isinstance(e, WorkflowImageData) for e in result["image1"]), \ + "Expected deserialized image data at the bottom level of batch" + # then + sizes_of_image2 = [len(e) for e in result["image2"]] + assert sizes_of_image2 == [2, 1], "image1 is 2D batch of size [(2, ), (1, )]" + assert all(isinstance(e, WorkflowImageData) for nested_batch in result["image2"] for e in nested_batch), \ + "Expected deserialized image data at the bottom level of batch" + sizes_of_image3 = [[len(e) for e in inner_batch] for inner_batch in result["image3"]] + assert sizes_of_image3 == [[1, 2], [1, 2, 1]], "image1 is 3D batch of size [[(1, ), (2, )], [(1, ), (2, ), (1, )]]" + assert all( + isinstance(e, WorkflowImageData) + for nested_batch in result["image3"] + for inner_batch in nested_batch + for e in inner_batch + ), "Expected deserialized image data at the bottom level of batch" + + +def test_assemble_runtime_parameters_when_basic_types_are_passed_as_batch_oriented_inputs() -> None: + # given + runtime_parameters = { + "string_param": ["a", "b"], + "float_param": [1.0, 2.0], + "int_param": [3, 4], + "list_param": [["some", "list"], ["other", "list"]], + "boolean_param": [False, True], + "dict_param": [{"some": "dict"}, {"other": "dict"}] + } + defined_inputs = [ + WorkflowDataBatch(type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name]), + WorkflowDataBatch(type="WorkflowDataBatch", name="float_param", kind=[FLOAT_KIND.name]), + WorkflowDataBatch(type="WorkflowDataBatch", name="int_param", kind=[INTEGER_KIND]), + WorkflowDataBatch(type="WorkflowDataBatch", name="list_param", kind=[LIST_OF_VALUES_KIND]), + WorkflowDataBatch(type="WorkflowDataBatch", name="boolean_param", kind=[BOOLEAN_KIND]), + WorkflowDataBatch(type="WorkflowDataBatch", name="dict_param", kind=[DICTIONARY_KIND]), + ] + + # when + result = assemble_runtime_parameters( + runtime_parameters=runtime_parameters, + defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, + ) + + # then + assert result == { + "string_param": ["a", "b"], + "float_param": [1.0, 2.0], + "int_param": [3, 4], + "list_param": [["some", "list"], ["other", "list"]], + "boolean_param": [False, True], + "dict_param": [{"some": "dict"}, {"other": "dict"}] + }, "Expected values not to be changed" + + +def test_assemble_runtime_parameters_when_input_batch_shallower_than_declared() -> None: + # given + runtime_parameters = { + "string_param": ["a", "b"], + "float_param": [1.0, 2.0], + } + defined_inputs = [ + WorkflowDataBatch(type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name]), + WorkflowDataBatch(type="WorkflowDataBatch", name="float_param", kind=[FLOAT_KIND.name], dimensionality=2), + ] + + # when + with pytest.raises(RuntimeInputError): + _ = assemble_runtime_parameters( + runtime_parameters=runtime_parameters, + defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, + ) + + +def test_assemble_runtime_parameters_when_input_batch_deeper_than_declared() -> None: + # given + runtime_parameters = { + "string_param": ["a", "b"], + "float_param": [[1.0], [2.0]], + } + defined_inputs = [ + WorkflowDataBatch(type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name]), + WorkflowDataBatch(type="WorkflowDataBatch", name="float_param", kind=[FLOAT_KIND.name]), + ] + + # when + result = assemble_runtime_parameters( + runtime_parameters=runtime_parameters, + defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, + ) + + pass \ No newline at end of file From 2c5a13aa56a7ec7cfd3873420f44cbcb868a11c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 09:12:29 +0100 Subject: [PATCH 10/67] Add deserialization for more kinds to ensure ability to properly validate inputs --- .../core_steps/common/deserializers.py | 210 +++++++++++++++++- inference/core/workflows/core_steps/loader.py | 32 +++ .../stitch_ocr_detections/v1.py | 4 +- .../core_steps/visualizations/keypoint/v1.py | 4 +- .../executor/test_runtime_input_assembler.py | 13 +- 5 files changed, 251 insertions(+), 12 deletions(-) diff --git a/inference/core/workflows/core_steps/common/deserializers.py b/inference/core/workflows/core_steps/common/deserializers.py index 8d1285887..8345914cc 100644 --- a/inference/core/workflows/core_steps/common/deserializers.py +++ b/inference/core/workflows/core_steps/common/deserializers.py @@ -1,9 +1,10 @@ import os -from typing import Any, List +from typing import Any, List, Optional, Tuple, Union from uuid import uuid4 import cv2 import numpy as np +import pybase64 import supervision as sv from pydantic import ValidationError @@ -40,6 +41,8 @@ WorkflowImageData, ) +AnyNumber = Union[int, float] + def deserialize_image_kind( parameter: str, @@ -225,3 +228,208 @@ def deserialize_numpy_array(parameter: str, raw_array: Any) -> np.ndarray: if isinstance(raw_array, np.ndarray): return raw_array return np.array(raw_array) + + +def deserialize_optional_string_kind(parameter: str, value: Any) -> Optional[str]: + if value is None: + return None + return deserialize_string_kind(parameter=parameter, value=value) + + +def deserialize_string_kind(parameter: str, value: Any) -> str: + if not isinstance(value, str): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"string value, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_float_zero_to_one_kind(parameter: str, value: Any) -> float: + value = deserialize_float_kind(parameter=parameter, value=value) + if not (0.0 <= value <= 1.0): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"float value in range [0.0, 1.0], but value out of range detected.", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_float_kind(parameter: str, value: Any) -> float: + if not isinstance(value, float) and not isinstance(value, int): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"float value, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + return float(value) + + +def deserialize_list_of_values_kind(parameter: str, value: Any) -> list: + if ( + not isinstance(value, list) + and not isinstance(value, set) + and not isinstance(value, tuple) + ): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"list, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + if not isinstance(value, list): + return list(value) + return value + + +def deserialize_boolean_kind(parameter: str, value: Any) -> bool: + if not isinstance(value, bool): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"boolean value, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_integer_kind(parameter: str, value: Any) -> int: + if not isinstance(value, int): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"integer value, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + return value + + +REQUIRED_CLASSIFICATION_PREDICTION_KEYS = { + "image", + "predictions", +} + + +def deserialize_classification_prediction_kind(parameter: str, value: Any) -> dict: + value = deserialize_dictionary_kind(parameter=parameter, value=value) + if any(k not in value for k in REQUIRED_CLASSIFICATION_PREDICTION_KEYS): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"classification prediction value, but found that one of required keys " + f"({list(REQUIRED_CLASSIFICATION_PREDICTION_KEYS)}) " + f"is missing.", + context="workflow_execution | runtime_input_validation", + ) + if "predicted_classes" not in value and ( + "top" not in value or "confidence" not in value + ): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"classification prediction value, but found that passed value misses " + f"prediction details.", + context="workflow_execution | runtime_input_validation", + ) + if "prediction_type" not in value: + value["prediction_type"] = "classification" + if "inference_id" not in value: + value["inference_id"] = str(uuid4()) + if "parent_id" not in value: + value["parent_id"] = parameter + if "root_parent_id" not in value: + value["root_parent_id"] = parameter + return value + + +def deserialize_dictionary_kind(parameter: str, value: Any) -> dict: + if not isinstance(value, dict): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"dict value, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_point_kind(parameter: str, value: Any) -> Tuple[AnyNumber, AnyNumber]: + if not isinstance(value, list) and not isinstance(value, tuple): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"point coordinates, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + if len(value) < 2: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"point coordinates, but missing point coordinates detected.", + context="workflow_execution | runtime_input_validation", + ) + value = tuple(value[:2]) + if any(not _is_number(e) for e in value): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"point coordinates, but at least one of the coordinate is not number", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_zone_kind( + parameter: str, value: Any +) -> List[List[Tuple[AnyNumber, AnyNumber]]]: + if not isinstance(value, list) or len(value) < 3: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"zone coordinates, but defined zone is not a list with at least 3 points coordinates.", + context="workflow_execution | runtime_input_validation", + ) + if any( + (not isinstance(e, list) and not isinstance(e, tuple)) or len(e) != 2 + for e in value + ): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"zone coordinates, but defined zone contains at least one element which is not a point with" + f"exactly two coordinates (x, y).", + context="workflow_execution | runtime_input_validation", + ) + if any(not _is_number(e[0]) or not _is_number(e[1]) for e in value): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"zone coordinates, but defined zone contains at least one element which is not a point with" + f"exactly two coordinates (x, y) being numbers.", + context="workflow_execution | runtime_input_validation", + ) + return value + + +def deserialize_rgb_color_kind( + parameter: str, value: Any +) -> Union[Tuple[int, int, int], str]: + if ( + not isinstance(value, list) + and not isinstance(value, tuple) + and not isinstance(value, str) + ): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"RGB color, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + if isinstance(value, str): + return value + return tuple(value[:3]) + + +def deserialize_bytes_kind(parameter: str, value: Any) -> bytes: + if not isinstance(value, str) and not isinstance(value, bytes): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"bytes string, but invalid type of data found (`{type(value).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) + if isinstance(value, bytes): + return value + return pybase64.b64decode(value) + + +def _is_number(value: Any) -> bool: + return isinstance(value, int) or isinstance(value, float) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 57b40d4d7..82e54bea7 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -69,10 +69,23 @@ ImageThresholdBlockV1, ) from inference.core.workflows.core_steps.common.deserializers import ( + deserialize_boolean_kind, + deserialize_bytes_kind, + deserialize_classification_prediction_kind, deserialize_detections_kind, + deserialize_dictionary_kind, + deserialize_float_kind, + deserialize_float_zero_to_one_kind, deserialize_image_kind, + deserialize_integer_kind, + deserialize_list_of_values_kind, deserialize_numpy_array, + deserialize_optional_string_kind, + deserialize_point_kind, + deserialize_rgb_color_kind, + deserialize_string_kind, deserialize_video_metadata_kind, + deserialize_zone_kind, ) from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.core_steps.common.serializers import ( @@ -374,6 +387,25 @@ QR_CODE_DETECTION_KIND.name: deserialize_detections_kind, BAR_CODE_DETECTION_KIND.name: deserialize_detections_kind, NUMPY_ARRAY_KIND.name: deserialize_numpy_array, + ROBOFLOW_MODEL_ID_KIND.name: deserialize_string_kind, + ROBOFLOW_PROJECT_KIND.name: deserialize_string_kind, + ROBOFLOW_API_KEY_KIND.name: deserialize_optional_string_kind, + FLOAT_ZERO_TO_ONE_KIND.name: deserialize_float_zero_to_one_kind, + LIST_OF_VALUES_KIND.name: deserialize_list_of_values_kind, + BOOLEAN_KIND.name: deserialize_boolean_kind, + INTEGER_KIND.name: deserialize_integer_kind, + STRING_KIND.name: deserialize_string_kind, + TOP_CLASS_KIND.name: deserialize_string_kind, + FLOAT_KIND.name: deserialize_float_kind, + DICTIONARY_KIND.name: deserialize_dictionary_kind, + CLASSIFICATION_PREDICTION_KIND.name: deserialize_classification_prediction_kind, + POINT_KIND.name: deserialize_point_kind, + ZONE_KIND.name: deserialize_zone_kind, + RGB_COLOR_KIND.name: deserialize_rgb_color_kind, + LANGUAGE_MODEL_OUTPUT_KIND.name: deserialize_string_kind, + PREDICTION_TYPE_KIND.name: deserialize_string_kind, + PARENT_ID_KIND.name: deserialize_string_kind, + BYTES_KIND.name: deserialize_bytes_kind, } diff --git a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py index 4141f8de0..f34636e1e 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py @@ -13,7 +13,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -96,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stitch_ocr_detections@v1"] - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index 624a2e30b..dc44f94f1 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -16,7 +16,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, STRING_KIND, - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -48,7 +48,7 @@ class KeypointManifest(VisualizationManifest): } ) - predictions: StepOutputSelector( + predictions: BatchOfDataSelector( kind=[ KEYPOINT_DETECTION_PREDICTION_KIND, ] diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py index b84a74eb7..56ca839a5 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py @@ -718,10 +718,9 @@ def test_assemble_runtime_parameters_when_input_batch_deeper_than_declared() -> ] # when - result = assemble_runtime_parameters( - runtime_parameters=runtime_parameters, - defined_inputs=defined_inputs, - kinds_deserializers=KINDS_DESERIALIZERS, - ) - - pass + with pytest.raises(RuntimeInputError): + _ = assemble_runtime_parameters( + runtime_parameters=runtime_parameters, + defined_inputs=defined_inputs, + kinds_deserializers=KINDS_DESERIALIZERS, + ) From 7bdcce8250d988458fdf98124c67779417b00d07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 10:28:33 +0100 Subject: [PATCH 11/67] Add tests for deserialization --- .../core_steps/common/deserializers.py | 20 +- .../core_steps/common/test_deserializers.py | 683 ++++++++++++++++++ 2 files changed, 697 insertions(+), 6 deletions(-) create mode 100644 tests/workflows/unit_tests/core_steps/common/test_deserializers.py diff --git a/inference/core/workflows/core_steps/common/deserializers.py b/inference/core/workflows/core_steps/common/deserializers.py index 8345914cc..9f6ab1c62 100644 --- a/inference/core/workflows/core_steps/common/deserializers.py +++ b/inference/core/workflows/core_steps/common/deserializers.py @@ -162,7 +162,7 @@ def deserialize_detections_kind( detection.get(PARENT_ID_KEY, parameter) for detection in detections["predictions"] ] - detections[PARENT_ID_KEY] = np.array(parent_ids) + parsed_detections[PARENT_ID_KEY] = np.array(parent_ids) optional_elements_keys = [ (PATH_DEVIATION_KEY_IN_INFERENCE_RESPONSE, PATH_DEVIATION_KEY_IN_SV_DETECTIONS), (TIME_IN_ZONE_KEY_IN_INFERENCE_RESPONSE, TIME_IN_ZONE_KEY_IN_SV_DETECTIONS), @@ -227,6 +227,12 @@ def _attach_optional_key_points_detections( def deserialize_numpy_array(parameter: str, raw_array: Any) -> np.ndarray: if isinstance(raw_array, np.ndarray): return raw_array + if not isinstance(raw_array, list): + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"numpy array value, but invalid type of data found (`{type(raw_array).__name__}`).", + context="workflow_execution | runtime_input_validation", + ) return np.array(raw_array) @@ -268,11 +274,7 @@ def deserialize_float_kind(parameter: str, value: Any) -> float: def deserialize_list_of_values_kind(parameter: str, value: Any) -> list: - if ( - not isinstance(value, list) - and not isinstance(value, set) - and not isinstance(value, tuple) - ): + if not isinstance(value, list) and not isinstance(value, tuple): raise RuntimeInputError( public_message=f"Detected runtime parameter `{parameter}` declared to hold " f"list, but invalid type of data found (`{type(value).__name__}`).", @@ -416,6 +418,12 @@ def deserialize_rgb_color_kind( ) if isinstance(value, str): return value + if len(value) < 3: + raise RuntimeInputError( + public_message=f"Detected runtime parameter `{parameter}` declared to hold " + f"RGB color, but not all colors defined.", + context="workflow_execution | runtime_input_validation", + ) return tuple(value[:3]) diff --git a/tests/workflows/unit_tests/core_steps/common/test_deserializers.py b/tests/workflows/unit_tests/core_steps/common/test_deserializers.py new file mode 100644 index 000000000..d9e98c9a3 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/common/test_deserializers.py @@ -0,0 +1,683 @@ +import base64 + +import numpy as np +import pytest +import supervision as sv + +from inference.core.workflows.core_steps.common.deserializers import ( + deserialize_boolean_kind, + deserialize_bytes_kind, + deserialize_classification_prediction_kind, + deserialize_detections_kind, + deserialize_float_zero_to_one_kind, + deserialize_integer_kind, + deserialize_list_of_values_kind, + deserialize_numpy_array, + deserialize_optional_string_kind, + deserialize_point_kind, + deserialize_rgb_color_kind, + deserialize_zone_kind, +) +from inference.core.workflows.errors import RuntimeInputError + + +def test_deserialize_detections_kind_when_sv_detections_given() -> None: + # given + detections = sv.Detections.empty() + + # when + result = deserialize_detections_kind( + parameter="my_param", + detections=detections, + ) + + # then + assert result is detections, "Expected object not to be touched" + + +def test_deserialize_detections_kind_when_invalid_data_type_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_detections_kind( + parameter="my_param", + detections="INVALID", + ) + + +def test_deserialize_detections_kind_when_malformed_data_type_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_detections_kind( + parameter="my_param", + detections={ + "image": {"height": 100, "width": 300}, + # lack of predictions + }, + ) + + +def test_deserialize_detections_kind_when_serialized_empty_detections_given() -> None: + # given + detections = { + "image": {"height": 100, "width": 300}, + "predictions": [], + } + + # when + result = deserialize_detections_kind( + parameter="my_param", + detections=detections, + ) + + # then + assert isinstance(result, sv.Detections) + assert len(result) == 0 + + +def test_deserialize_detections_kind_when_serialized_non_empty_object_detections_given() -> ( + None +): + # given + detections = { + "image": { + "width": 168, + "height": 192, + }, + "predictions": [ + { + "data": "some", + "width": 1.0, + "height": 1.0, + "x": 1.5, + "y": 1.5, + "confidence": 0.1, + "class_id": 1, + "tracker_id": 1, + "class": "cat", + "detection_id": "first", + "parent_id": "image", + }, + ], + } + + # when + result = deserialize_detections_kind( + parameter="my_param", + detections=detections, + ) + + # then + assert isinstance(result, sv.Detections) + assert len(result) == 1 + assert np.allclose(result.xyxy, np.array([[1, 1, 2, 2]])) + assert result.data["class_name"] == np.array(["cat"]) + assert result.data["detection_id"] == np.array(["first"]) + assert result.data["parent_id"] == np.array(["image"]) + assert result.data["detection_id"] == np.array(["first"]) + assert np.allclose(result.data["image_dimensions"], np.array([[192, 168]])) + + +def test_deserialize_detections_kind_when_serialized_non_empty_instance_segmentations_given() -> ( + None +): + # given + detections = { + "image": { + "width": 168, + "height": 192, + }, + "predictions": [ + { + "data": "some", + "width": 1.0, + "height": 1.0, + "x": 1.5, + "y": 1.5, + "confidence": 0.1, + "class_id": 1, + "tracker_id": 1, + "class": "cat", + "detection_id": "first", + "parent_id": "image", + "points": [ + {"x": 1.0, "y": 1.0}, + {"x": 1.0, "y": 10.0}, + {"x": 10.0, "y": 10.0}, + {"x": 10.0, "y": 1.0}, + ], + }, + ], + } + + # when + result = deserialize_detections_kind( + parameter="my_param", + detections=detections, + ) + + # then + assert isinstance(result, sv.Detections) + assert len(result) == 1 + assert np.allclose(result.xyxy, np.array([[1, 1, 2, 2]])) + assert result.data["class_name"] == np.array(["cat"]) + assert result.data["detection_id"] == np.array(["first"]) + assert result.data["parent_id"] == np.array(["image"]) + assert result.data["detection_id"] == np.array(["first"]) + assert np.allclose(result.data["image_dimensions"], np.array([[192, 168]])) + assert result.mask.shape == (1, 192, 168) + + +def test_deserialize_detections_kind_when_serialized_non_empty_keypoints_detections_given() -> ( + None +): + # given + detections = { + "image": { + "width": 168, + "height": 192, + }, + "predictions": [ + { + "data": "some", + "width": 1.0, + "height": 1.0, + "x": 1.5, + "y": 1.5, + "confidence": 0.1, + "class_id": 1, + "tracker_id": 1, + "class": "cat", + "detection_id": "first", + "parent_id": "image", + "keypoints": [ + { + "class_id": 1, + "class_name": "nose", + "confidence": 0.1, + "x": 11.0, + "y": 11.0, + }, + { + "class_id": 2, + "class_name": "ear", + "confidence": 0.2, + "x": 12.0, + "y": 13.0, + }, + { + "class_id": 3, + "class_name": "eye", + "confidence": 0.3, + "x": 14.0, + "y": 15.0, + }, + ], + }, + ], + } + + # when + result = deserialize_detections_kind( + parameter="my_param", + detections=detections, + ) + + # then + assert isinstance(result, sv.Detections) + assert len(result) == 1 + assert np.allclose(result.xyxy, np.array([[1, 1, 2, 2]])) + assert result.data["class_name"] == np.array(["cat"]) + assert result.data["detection_id"] == np.array(["first"]) + assert result.data["parent_id"] == np.array(["image"]) + assert result.data["detection_id"] == np.array(["first"]) + assert np.allclose(result.data["image_dimensions"], np.array([[192, 168]])) + assert ( + result.data["keypoints_class_id"] + == np.array( + [np.array([1, 2, 3])], + dtype="object", + ) + ).all() + assert ( + result.data["keypoints_class_name"] + == np.array( + np.array(["nose", "ear", "eye"]), + dtype="object", + ) + ).all() + assert np.allclose( + result.data["keypoints_confidence"].astype(np.float64), + np.array([[0.1, 0.2, 0.3]], dtype=np.float64), + ) + + +def test_deserialize_numpy_array_when_numpy_array_is_given() -> None: + # given + raw_array = np.array([1, 2, 3]) + + # when + result = deserialize_numpy_array(parameter="some", raw_array=raw_array) + + # then + assert result is raw_array + + +def test_deserialize_numpy_array_when_serialized_array_is_given() -> None: + # given + raw_array = [1, 2, 3] + + # when + result = deserialize_numpy_array(parameter="some", raw_array=raw_array) + + # then + assert np.allclose(result, np.array([1, 2, 3])) + + +def test_deserialize_numpy_array_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_numpy_array(parameter="some", raw_array="invalid") + + +def test_deserialize_optional_string_kind_when_empty_value_given() -> None: + # when + result = deserialize_optional_string_kind(parameter="some", value=None) + + # then + assert result is None + + +def test_deserialize_optional_string_kind_when_string_given() -> None: + # when + result = deserialize_optional_string_kind(parameter="some", value="some") + + # then + assert result == "some" + + +def test_deserialize_optional_string_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_optional_string_kind(parameter="some", value=b"some") + + +def test_deserialize_float_zero_to_one_kind_when_not_a_number_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_float_zero_to_one_kind(parameter="some", value="some") + + +def test_deserialize_float_zero_to_one_kind_when_integer_given() -> None: + # when + result = deserialize_float_zero_to_one_kind(parameter="some", value=1) + + # then + assert abs(result - 1.0) < 1e-5 + assert isinstance(result, float) + + +def test_deserialize_float_zero_to_one_kind_when_float_given() -> None: + # when + result = deserialize_float_zero_to_one_kind(parameter="some", value=0.5) + + # then + assert abs(result - 0.5) < 1e-5 + assert isinstance(result, float) + + +def test_deserialize_float_zero_to_one_kind_when_value_out_of_range_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_float_zero_to_one_kind(parameter="some", value=1.5) + + +def test_deserialize_list_of_values_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_list_of_values_kind(parameter="some", value=1.5) + + +def test_deserialize_list_of_values_kind_when_list_given() -> None: + # when + result = deserialize_list_of_values_kind(parameter="some", value=[1, 2, 3]) + + # then + assert result == [1, 2, 3] + + +def test_deserialize_list_of_values_kind_when_tuple_given() -> None: + # when + result = deserialize_list_of_values_kind(parameter="some", value=(1, 2, 3)) + + # then + assert result == [1, 2, 3] + + +def test_deserialize_boolean_kind_when_boolean_given() -> None: + # when + result = deserialize_boolean_kind(parameter="some", value=True) + + # then + assert result is True + + +def test_deserialize_boolean_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_boolean_kind(parameter="some", value="True") + + +def test_deserialize_integer_kind_when_integer_given() -> None: + # when + result = deserialize_integer_kind(parameter="some", value=3) + + # then + assert result == 3 + + +def test_deserialize_integer_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_integer_kind(parameter="some", value=3.0) + + +def test_deserialize_classification_prediction_kind_when_valid_multi_class_prediction_given() -> ( + None +): + # given + prediction = { + "image": {"height": 128, "width": 256}, + "predictions": [{"class_name": "A", "class_id": 0, "confidence": 0.3}], + "top": "A", + "confidence": 0.3, + "parent_id": "some", + "prediction_type": "classification", + "inference_id": "some", + "root_parent_id": "some", + } + + # when + result = deserialize_classification_prediction_kind( + parameter="some", + value=prediction, + ) + + # then + assert result is prediction + + +def test_deserialize_classification_prediction_kind_when_valid_multi_label_prediction_given() -> ( + None +): + # given + prediction = { + "image": {"height": 128, "width": 256}, + "predictions": { + "a": {"confidence": 0.3, "class_id": 0}, + "b": {"confidence": 0.3, "class_id": 1}, + }, + "predicted_classes": ["a", "b"], + "parent_id": "some", + "prediction_type": "classification", + "inference_id": "some", + "root_parent_id": "some", + } + + # when + result = deserialize_classification_prediction_kind( + parameter="some", + value=prediction, + ) + + # then + assert result is prediction + + +def test_deserialize_classification_prediction_kind_when_not_a_dictionary_given() -> ( + None +): + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_classification_prediction_kind( + parameter="some", + value="invalid", + ) + + +@pytest.mark.parametrize( + "to_delete", + [ + ["image"], + ["predictions"], + ["top", "predicted_classes"], + ["confidence", "predicted_classes"], + ], +) +def test_deserialize_classification_prediction_kind_when_required_keys_not_given( + to_delete: list, +) -> None: + # given + prediction = { + "image": {"height": 128, "width": 256}, + "predictions": [{"class_name": "A", "class_id": 0, "confidence": 0.3}], + "top": "A", + "confidence": 0.3, + "predicted_classes": ["a", "b"], + "parent_id": "some", + "prediction_type": "classification", + "inference_id": "some", + "root_parent_id": "some", + } + for field in to_delete: + del prediction[field] + + with pytest.raises(RuntimeInputError): + _ = deserialize_classification_prediction_kind( + parameter="some", + value=prediction, + ) + + +def test_deserialize_zone_kind_when_valid_input_given() -> None: + # given + zone = [ + (1, 2), + [3, 4], + (5, 6), + ] + + # when + result = deserialize_zone_kind(parameter="some", value=zone) + + # then + assert result == [ + (1, 2), + [3, 4], + (5, 6), + ] + + +def test_deserialize_zone_kind_when_zone_misses_points() -> None: + # given + zone = [ + [3, 4], + (5, 6), + ] + + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_zone_kind(parameter="some", value=zone) + + +def test_deserialize_zone_kind_when_zone_has_invalid_elements() -> None: + # given + zone = [[3, 4], (5, 6), "invalid"] + + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_zone_kind(parameter="some", value=zone) + + +def test_deserialize_zone_kind_when_zone_defines_invalid_points() -> None: + # given + zone = [ + [3, 4], + (5, 6, 3), + (1, 2), + ] + + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_zone_kind(parameter="some", value=zone) + + +def test_deserialize_zone_kind_when_zone_defines_points_not_being_numbers() -> None: + # given + zone = [ + [3, 4], + (5, 6), + (1, "invalid"), + ] + + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_zone_kind(parameter="some", value=zone) + + +def test_deserialize_rgb_color_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_rgb_color_kind(parameter="some", value=1) + + +def test_deserialize_rgb_color_kind_when_string_given() -> None: + # when + result = deserialize_rgb_color_kind(parameter="some", value="#fff") + + # then + assert result == "#fff" + + +def test_deserialize_rgb_color_kind_when_valid_tuple_given() -> None: + # when + result = deserialize_rgb_color_kind(parameter="some", value=(1, 2, 3)) + + # then + assert result == (1, 2, 3) + + +def test_deserialize_rgb_color_kind_when_to_short_tuple_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_rgb_color_kind(parameter="some", value=(1, 2)) + + +def test_deserialize_rgb_color_kind_when_to_long_tuple_given() -> None: + # when + result = deserialize_rgb_color_kind(parameter="some", value=(1, 2, 3, 4)) + + # then + assert result == (1, 2, 3) + + +def test_deserialize_rgb_color_kind_when_valid_list_given() -> None: + # when + result = deserialize_rgb_color_kind(parameter="some", value=[1, 2, 3]) + + # then + assert result == (1, 2, 3) + + +def test_deserialize_rgb_color_kind_when_to_short_list_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_rgb_color_kind(parameter="some", value=[1, 2]) + + +def test_deserialize_rgb_color_kind_when_to_long_list_given() -> None: + # when + result = deserialize_rgb_color_kind(parameter="some", value=[1, 2, 3, 4]) + + # then + assert result == (1, 2, 3) + + +def test_deserialize_point_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_point_kind(parameter="some", value=1) + + +def test_deserialize_point_kind_when_valid_tuple_given() -> None: + # when + result = deserialize_point_kind(parameter="some", value=(1, 2)) + + # then + assert result == (1, 2) + + +def test_deserialize_point_kind_when_to_short_tuple_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_point_kind(parameter="some", value=(1,)) + + +def test_deserialize_point_kind_when_to_long_tuple_given() -> None: + # when + result = deserialize_point_kind(parameter="some", value=(1, 2, 3, 4)) + + # then + assert result == (1, 2) + + +def test_deserialize_point_kind_when_valid_list_given() -> None: + # when + result = deserialize_point_kind(parameter="some", value=[1, 2]) + + # then + assert result == (1, 2) + + +def test_deserialize_point_kind_when_to_short_list_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_point_kind(parameter="some", value=[1]) + + +def test_deserialize_point_kind_when_to_long_list_given() -> None: + # when + result = deserialize_point_kind(parameter="some", value=[1, 2, 3, 4]) + + # then + assert result == (1, 2) + + +def test_deserialize_point_kind_when_point_element_is_not_number() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_point_kind(parameter="some", value=[1, "invalid"]) + + +def test_deserialize_bytes_kind_when_invalid_value_given() -> None: + # when + with pytest.raises(RuntimeInputError): + _ = deserialize_bytes_kind(parameter="some", value=1) + + +def test_deserialize_bytes_kind_when_bytes_given() -> None: + # when + result = deserialize_bytes_kind(parameter="some", value=b"abcd") + + # then + assert result == b"abcd" + + +def test_deserialize_bytes_kind_when_base64_string_given() -> None: + # given + data = base64.b64encode(b"data").decode("utf-8") + + # when + result = deserialize_bytes_kind(parameter="some", value=data) + + # then + assert result == b"data" From 34773e31905eb3afd57b332cf47e3bef04d6a493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 10:35:16 +0100 Subject: [PATCH 12/67] Add tests for filtering of workflow results --- .../core/interfaces/http/handlers/__init__.py | 0 .../http/handlers/test_workflows.py | 85 +++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 tests/inference/unit_tests/core/interfaces/http/handlers/__init__.py create mode 100644 tests/inference/unit_tests/core/interfaces/http/handlers/test_workflows.py diff --git a/tests/inference/unit_tests/core/interfaces/http/handlers/__init__.py b/tests/inference/unit_tests/core/interfaces/http/handlers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/inference/unit_tests/core/interfaces/http/handlers/test_workflows.py b/tests/inference/unit_tests/core/interfaces/http/handlers/test_workflows.py new file mode 100644 index 000000000..181a56ac3 --- /dev/null +++ b/tests/inference/unit_tests/core/interfaces/http/handlers/test_workflows.py @@ -0,0 +1,85 @@ +from inference.core.interfaces.http.handlers.workflows import ( + filter_out_unwanted_workflow_outputs, +) + + +def test_filter_out_unwanted_workflow_outputs_when_nothing_to_filter() -> None: + # given + workflow_results = [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + # when + result = filter_out_unwanted_workflow_outputs( + workflow_results=workflow_results, + excluded_fields=None, + ) + + # then + assert result == [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + +def test_filter_out_unwanted_workflow_outputs_when_empty_filter() -> None: + # given + workflow_results = [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + # when + result = filter_out_unwanted_workflow_outputs( + workflow_results=workflow_results, + excluded_fields=[], + ) + + # then + assert result == [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + +def test_filter_out_unwanted_workflow_outputs_when_fields_to_be_filtered() -> None: + # given + workflow_results = [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + # when + result = filter_out_unwanted_workflow_outputs( + workflow_results=workflow_results, + excluded_fields=["a"], + ) + + # then + assert result == [ + {"b": 2}, + {"b": 4}, + ] + + +def test_filter_out_unwanted_workflow_outputs_when_filter_defines_non_existing_fields() -> ( + None +): + # given + workflow_results = [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] + + # when + result = filter_out_unwanted_workflow_outputs( + workflow_results=workflow_results, + excluded_fields=["non-existing"], + ) + + # then + assert result == [ + {"a": 1, "b": 2}, + {"a": 3, "b": 4}, + ] From f58a6d1f1d4997f3cd1cc53e22579e0ddbbc14f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 15:49:08 +0100 Subject: [PATCH 13/67] Add documentation - part 1 --- docs/workflows/execution_engine_changelog.md | 171 +++++++++++++++ docs/workflows/kinds.md | 57 +++-- docs/workflows/workflow_execution.md | 3 +- docs/workflows/workflows_compiler.md | 1 + .../core_steps/common/serializers.py | 18 +- .../v1/executor/output_constructor.py | 4 +- .../core_steps/common/test_serializers.py | 146 +++++++++++++ .../executor/test_output_constructor.py | 194 ++++++++++++++++++ .../plugin_with_kinds_serializers/__init__.py | 33 +++ .../introspection/test_blocks_loader.py | 46 +++++ 10 files changed, 641 insertions(+), 32 deletions(-) create mode 100644 tests/workflows/unit_tests/execution_engine/introspection/plugin_with_kinds_serializers/__init__.py diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index c3510700d..647a00bc5 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -38,3 +38,174 @@ include a new `video_metadata` property. This property can be optionally set in a default value with reasonable defaults will be used. To simplify metadata manipulation within blocks, we have introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and `WorkflowImageData.create_crop(...)`. For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types/#workflowimagedata). + + +## Execution Engine `v1.3.0` | inference `v0.26.0` + +* Introduced the change that let each kind have serializer and deserializer defined. The change decouples Workflows +plugins with Execution Engine and make it possible to integrate the ecosystem with external systems that +require data transfer through the wire. [Blocks bundling](/workflows/blocks_bundling/) page was updated to reflect +that change. + +* *Kinds* defined in `roboflow_core` plugin were provided with suitable serializers and deserializers + +* Workflows Compiler and Execution Engine were enhanced to **support batch-oriented inputs of +any *kind***, contrary to versions prior `v1.3.0`, which could only take `image` and `video_metadata` kinds +as batch-oriented inputs (as a result of unfortunate and not-needed coupling of kind to internal data +format introduced **at the level of Execution Engine**). As a result of the change: + + * **new input type was introduced:** `WorkflowDataBatch` should be used from now one to denote + batch-oriented inputs (and clearly separate them from `WorkflowParameters`). `WorkflowDataBatch` + let users define both *[kind](/workflows/kinds/)* of the data and it's + *[dimensionality](/workflows/workflow_execution/#steps-interactions-with-data)*. + New input type is effectively a superset of all previous batch-oriented inputs: `WorkflowImage` and + `WorkflowVideoMetadata`, which **remain supported**, but **will be removed in Execution Engine `v2`**. + We advise adjusting to the new input format, yet the requirement is not strict at the moment - as + Execution Engine requires now explicit definition of input data *kind* to select data deserializer + properly. This may not be the case in the future, as in most cases batch-oriented data *kind* may + be inferred by compiler (yet this feature is not implemented for now). + + * **new selector type annotation was introduced** - `BatchOfDataSelector` which is supposed to + replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector` and `WorkflowVideoMetadataSelector` + in block manifests, allowing batch-oriented data to be used as block input, regardless of whether it comes + from user inputs or outputs of other blocks. Mentioned old annotation types **should be assumed deprecated**, + we advise to migrate into `BatchOfDataSelector`, but that is not hard requirement. + +* As a result of the changes, it is now possible to **split any arbitrary workflows into multiple ones executing +subsets of steps**, enabling building such tools as debuggers. + +!!! warning "Breaking change planned - Execution Engine `v2.0.0`" + + * `WorkflowImage` and `WorkflowVideoMetadata` inputs will be removed from Workflows ecosystem. + + * `StepOutputSelector, `WorkflowImageSelector`, `StepOutputImageSelector` and `WorkflowVideoMetadataSelector` + type annotations used in block manifests will be removed from Workflows ecosystem. + + +### Migration guide + +??? Hint "Kinds' serializers and deserializers" + + Creating your Workflows plugin you may introduce custom serializers and deserializers + for Workflows *kinds*. To achieve that end, simply place the following dictionaries + in the main module of the plugin (the same where you place `load_blocks(...)` function): + + ```python + from typing import Any + + def serialize_kind(value: Any) -> Any: + # place here the code that will be used to + # transform internal Workflows data representation into + # the external one (that can be sent through the wire in JSON, using + # default JSON encoder for Python). + pass + + + def deserialize_kind(parameter_name: str, value: Any) -> Any: + # place here the code that will be used to decode + # data sent through the wire into the Execution Engine + # and transform it into proper internal Workflows data representation + # which is understood by the blocks. + pass + + + KINDS_SERIALIZERS = { + "name_of_the_kind": serialize_kind, + } + KINDS_DESERIALIZERS = { + "name_of_the_kind": deserialize_kind, + } + ``` + +??? Hint "New type annotation for selectors" + + Blocks manifest may **optionally** be updated to use `BatchOfDataSelector` in the following way: + + ```python + from typing import Union + from inference.core.workflows.prototypes.block import WorkflowBlockManifest + from inference.core.workflows.execution_engine.entities.types import ( + INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + WorkflowImageSelector, + StepOutputImageSelector, + StepOutputSelector, + ) + + + class BlockManifest(WorkflowBlockManifest): + + reference_image: Union[WorkflowImageSelector, StepOutputImageSelector] + predictions: StepOutputSelector( + kind=[ + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) + ``` + + should just be changed into: + + ```{ .py linenums="1" hl_lines="5 11 12"} + from inference.core.workflows.prototypes.block import WorkflowBlockManifest + from inference.core.workflows.execution_engine.entities.types import ( + INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + BatchOfDataSelector, + IMAGE_KIND, + ) + + + class BlockManifest(WorkflowBlockManifest): + reference_image: BatchOfDataSelector(kind=[IMAGE_KIND]) + predictions: BatchOfDataSelector( + kind=[ + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) + ``` + + +??? Hint "New inputs in Workflows definitions" + + Anyone that used either `WorkflowImage` or `WorkflowVideoMetadata` inputs in their + Workflows definition may **optionally** migrate into `WorkflowDataBatch`. The transition + is illustrated below: + + ```json + { + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowVideoMetadata", "name": "video_metadata"} + ] + } + ``` + + should be changed into: + ```json + { + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "image", + "kind": ["image"] + }, + { + "type": "WorkflowDataBatch", + "name": "video_metadata", + "kind": ["video_metadata"] + } + ] + } + ``` + + **Leaving `kind` field empty may prevent some data - like images - from being deserialized properly.** + + + !!! Note + + If you do not like the way how data is serialized in `roboflow_core` plugin, + feel free to alter the serialization methods for *kinds*, simply registering + the function in your plugin and loading it to the Execution Engine - the + serializer/deserializer defined as the last one will be in use. diff --git a/docs/workflows/kinds.md b/docs/workflows/kinds.md index 37950a6eb..c81c92905 100644 --- a/docs/workflows/kinds.md +++ b/docs/workflows/kinds.md @@ -1,19 +1,38 @@ -# Workflows kinds +# Kinds -In Workflows, some values cannot be defined when the Workflow Definition is created. To address this, the Execution -Engine supports selectors, which are references to step outputs or workflow inputs. To help the Execution Engine -understand what type of data will be provided once a reference is resolved, we use a simple type system known as -`kinds`. +In Workflows, some values can’t be set in advance and are only determined during execution. +This is similar to writing a function where you don’t know the exact input values upfront — they’re only +provided at runtime, either from user inputs or from other function outputs. -`Kinds` are used to represent the semantic meaning of the underlying data. When a step outputs data of a specific -`kind` and another step requires input of that same `kind`, the system assumes that the data will be compatible. -This reduces the need for extensive type-compatibility checks. +To manage this, Workflows use *selectors*, which act like references, pointing to data without containing it directly. + +!!! Example *selectors* + + Selectors might refer to a named input - for example input image - like `$inputs.image` + or predictions generated by a previous step - like `$steps.my_model.predictions` + +In the Workflows ecosystem, users focus on data purpose (e.g., “image”) without worrying about its exact format. +Meanwhile, developers building workflow blocks need precise data formats. **Kinds** serve both needs - +they simplify data handling for users while ensuring developers work with the correct data structure. + + +## What are the **Kinds**? + +**Kinds** is Workflows type system with each **kind** defining: + +* **name** - expressing **semantic meaning** of the underlying data - like `image` or `point`; + +* **Python data representation** - the data type and format that blocks creators should expect when handling +the data within blocks; + +* optional **serialized data representation** - defining what is the format of the kind that +external systems should use to integrate with Workflows ecosystem - when needed, custom kinds serializers +and deserializers are provided to ensure seamless translation; + +Using kinds streamlines compatibility: when a step outputs data of a certain *kind* and another step requires that +same *kind*, the workflow engine assumes they’ll be compatible, reducing the need for compatibility checks and +providing compile-time verification of Workflows definitions. -For example, we have different kinds to distinguish between predictions from `object detection` and -`instance segmentation` models, even though representation of those `kinds` is -[`sv.Detections(...)`](https://supervision.roboflow.com/latest/detection/core/). This distinction ensures that each -block that needs a segmentation mask clearly indicates this requirement, avoiding the need to repeatedly check -for the presence of a mask in the input. !!! Note @@ -33,8 +52,20 @@ for the presence of a mask in the input. never existed in the ecosystem and fixed all blocks from `roboflow_core` plugin. If there is anyone impacted by the change - here is the [migration guide](https://github.com/roboflow/inference/releases/tag/v0.18.0). + + This warning **will be removed end of Q1 2025**. +!!! Warning + + Support for proper serialization and deserialization of any arbitrary *kind* was + introduced in Execution Engine `v1.3.0` (released with inference `0.26.0`). Workflows + plugins created prior that change may be updated - see refreshed + [Blocks Bundling](/workflows/blocks_bundling/) page. + + This warning **will be removed end of Q1 2025**. + + ## Kinds declared in Roboflow plugins * [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index e7ee00fed..96eefe419 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -53,7 +53,8 @@ actual data values. It simply tells the Execution Engine how to direct and handl Input data in a Workflow can be divided into two types: -- Data to be processed: This can be submitted as a batch of data points. +- Batch-Oriented Data to be processed: Main data to be processed, which you expect to derive results +from (for instance: making inference with your model) - Parameters: These are single values used for specific settings or configurations. diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 3e0fb6145..1bb8b58c9 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -232,6 +232,7 @@ is a batch of data - all batch elements are affected. * **The flow-control step operates on batch-oriented inputs with compatible lineage** - here, the flow-control step can decide separately for each element in the batch which ones will proceed and which ones will be stopped. +#### Batch-processing compatibility ## Initializing Workflow steps from blocks diff --git a/inference/core/workflows/core_steps/common/serializers.py b/inference/core/workflows/core_steps/common/serializers.py index 512df70b3..1a014cdc3 100644 --- a/inference/core/workflows/core_steps/common/serializers.py +++ b/inference/core/workflows/core_steps/common/serializers.py @@ -169,14 +169,7 @@ def serialize_wildcard_kind(value: Any) -> Any: def serialise_list(elements: List[Any]) -> List[Any]: result = [] for element in elements: - if isinstance(element, WorkflowImageData): - element = serialise_image(image=element) - elif isinstance(element, dict): - element = serialise_dict(elements=element) - elif isinstance(element, list): - element = serialise_list(elements=element) - elif isinstance(element, sv.Detections): - element = serialise_sv_detections(detections=element) + element = serialize_wildcard_kind(value=element) result.append(element) return result @@ -184,13 +177,6 @@ def serialise_list(elements: List[Any]) -> List[Any]: def serialise_dict(elements: Dict[str, Any]) -> Dict[str, Any]: serialised_result = {} for key, value in elements.items(): - if isinstance(value, WorkflowImageData): - value = serialise_image(image=value) - elif isinstance(value, dict): - value = serialise_dict(elements=value) - elif isinstance(value, list): - value = serialise_list(elements=value) - elif isinstance(value, sv.Detections): - value = serialise_sv_detections(detections=value) + value = serialize_wildcard_kind(value=value) serialised_result[key] = value return serialised_result diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index d44b9e247..883a3f541 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -11,7 +11,7 @@ from inference.core.workflows.core_steps.common.utils import ( sv_detections_to_root_coordinates, ) -from inference.core.workflows.errors import ExecutionEngineRuntimeError +from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) @@ -184,7 +184,7 @@ def serialize_data_piece( ) -> Any: if isinstance(kind, dict): if not isinstance(data_piece, dict): - raise ExecutionEngineRuntimeError( + raise AssumptionError( public_message=f"Could not serialize Workflow output `{output_name}` - expected the " f"output to be dictionary containing all outputs of the step, which is not the case." f"This is most likely a bug. Contact Roboflow team through github issues " diff --git a/tests/workflows/unit_tests/core_steps/common/test_serializers.py b/tests/workflows/unit_tests/core_steps/common/test_serializers.py index af77d536c..219a513d6 100644 --- a/tests/workflows/unit_tests/core_steps/common/test_serializers.py +++ b/tests/workflows/unit_tests/core_steps/common/test_serializers.py @@ -7,6 +7,7 @@ from inference.core.workflows.core_steps.common.serializers import ( serialise_image, serialise_sv_detections, + serialize_wildcard_kind, ) from inference.core.workflows.execution_engine.entities.base import ( ImageParentMetadata, @@ -210,3 +211,148 @@ def test_serialise_image() -> None: assert ( recovered_image == np_image ).all(), "Recovered image should be equal to input image" + + +def test_serialize_wildcard_kind_when_workflow_image_data_is_given() -> None: + # given + np_image = np.zeros((192, 168, 3), dtype=np.uint8) + value = WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np_image, + ) + + # when + result = serialize_wildcard_kind(value=value) + + # then + assert ( + result["type"] == "base64" + ), "Type of third element must be changed into base64" + decoded = base64.b64decode(result["value"]) + recovered_image = cv2.imdecode( + np.fromstring(decoded, dtype=np.uint8), + cv2.IMREAD_UNCHANGED, + ) + assert ( + recovered_image == np_image + ).all(), "Recovered image should be equal to input image" + + +def test_serialize_wildcard_kind_when_dictionary_is_given() -> None: + # given + np_image = np.zeros((192, 168, 3), dtype=np.uint8) + elements = { + "a": 3, + "b": "some", + "c": WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np_image, + ), + } + + # when + result = serialize_wildcard_kind(value=elements) + + # then + assert len(result) == 3, "The same number of elements must be returned" + assert result["a"] == 3, "First element of list must be untouched" + assert result["b"] == "some", "Second element of list must be untouched" + assert ( + result["c"]["type"] == "base64" + ), "Type of third element must be changed into base64" + decoded = base64.b64decode(result["c"]["value"]) + recovered_image = cv2.imdecode( + np.fromstring(decoded, dtype=np.uint8), + cv2.IMREAD_UNCHANGED, + ) + assert ( + recovered_image == np_image + ).all(), "Recovered image should be equal to input image" + + +def test_serialize_wildcard_kind_when_list_is_given() -> None: + # given + np_image = np.zeros((192, 168, 3), dtype=np.uint8) + elements = [ + 3, + "some", + WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np_image, + ), + ] + + # when + result = serialize_wildcard_kind(value=elements) + + # then + assert len(result) == 3, "The same number of elements must be returned" + assert result[0] == 3, "First element of list must be untouched" + assert result[1] == "some", "Second element of list must be untouched" + assert ( + result[2]["type"] == "base64" + ), "Type of third element must be changed into base64" + decoded = base64.b64decode(result[2]["value"]) + recovered_image = cv2.imdecode( + np.fromstring(decoded, dtype=np.uint8), + cv2.IMREAD_UNCHANGED, + ) + assert ( + recovered_image == np_image + ).all(), "Recovered image should be equal to input image" + + +def test_serialize_wildcard_kind_when_compound_input_is_given() -> None: + # given + np_image = np.zeros((192, 168, 3), dtype=np.uint8) + elements = [ + 3, + "some", + WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=np_image, + ), + { + "nested": [ + WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="other"), + numpy_image=np_image, + ) + ] + }, + ] + + # when + result = serialize_wildcard_kind(value=elements) + + # then + assert len(result) == 4, "The same number of elements must be returned" + assert result[0] == 3, "First element of list must be untouched" + assert result[1] == "some", "Second element of list must be untouched" + assert ( + result[2]["type"] == "base64" + ), "Type of third element must be changed into base64" + decoded = base64.b64decode(result[2]["value"]) + recovered_image = cv2.imdecode( + np.fromstring(decoded, dtype=np.uint8), + cv2.IMREAD_UNCHANGED, + ) + assert ( + recovered_image == np_image + ).all(), "Recovered image should be equal to input image" + nested_dict = result[3] + assert len(nested_dict["nested"]) == 1, "Expected one element in nested list" + assert ( + nested_dict["nested"][0]["type"] == "base64" + ), "Expected image serialized to base64" + assert ( + "video_metadata" in nested_dict["nested"][0] + ), "Expected video metadata attached" + decoded = base64.b64decode(nested_dict["nested"][0]["value"]) + recovered_image = cv2.imdecode( + np.fromstring(decoded, dtype=np.uint8), + cv2.IMREAD_UNCHANGED, + ) + assert ( + recovered_image == np_image + ).all(), "Recovered image should be equal to input image" diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index df51aec52..7b452748a 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -7,7 +7,13 @@ from networkx import DiGraph from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS +from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.entities.base import JsonField +from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + INTEGER_KIND, + STRING_KIND, +) from inference.core.workflows.execution_engine.v1.compiler.entities import ( NodeCategory, OutputNode, @@ -18,6 +24,7 @@ create_array, data_contains_sv_detections, place_data_in_array, + serialize_data_piece, ) @@ -561,3 +568,190 @@ def get_batch_data(selector: str, indices: List[tuple]) -> List[Any]: "b_empty": None, "b_empty_nested": [[]], } + + +def test_serialize_data_piece_for_wildcard_output_when_serializer_not_found() -> None: + # when + result = serialize_data_piece( + output_name="my_output", + data_piece={"some": "data", "other": "another"}, + kind={"some": [STRING_KIND], "other": [STRING_KIND]}, + kinds_serializers={}, + ) + + # then + assert result == {"some": "data", "other": "another"}, "Expected data not t0 change" + + +def test_serialize_data_piece_for_wildcard_output_when_missmatch_in_input_detected() -> ( + None +): + # when + with pytest.raises(AssumptionError): + _ = serialize_data_piece( + output_name="my_output", + data_piece="not a dict", + kind={"some": [STRING_KIND], "other": [STRING_KIND]}, + kinds_serializers={}, + ) + + +def test_serialize_data_piece_for_wildcard_output_when_serializers_found_but_all_failing() -> ( + None +): + # given + def _faulty_serializer(value: Any) -> Any: + raise Exception() + + # when + with pytest.raises(ExecutionEngineRuntimeError): + _ = serialize_data_piece( + output_name="my_output", + data_piece={"some": "data", "other": "another"}, + kind={"some": [STRING_KIND, INTEGER_KIND], "other": STRING_KIND}, + kinds_serializers={ + STRING_KIND.name: _faulty_serializer, + INTEGER_KIND.name: _faulty_serializer, + }, + ) + + +def test_serialize_data_piece_for_wildcard_output_when_serializers_found_with_one_failing_and_one_successful() -> ( + None +): + # given + faulty_calls = [] + + def _faulty_serializer(value: Any) -> Any: + faulty_calls.append(1) + raise Exception() + + def _valid_serializer(value: Any) -> Any: + return "serialized", value + + # when + result = serialize_data_piece( + output_name="my_output", + data_piece={"some": "data", "other": "another"}, + kind={"some": [INTEGER_KIND, STRING_KIND], "other": [STRING_KIND]}, + kinds_serializers={ + STRING_KIND.name: _valid_serializer, + INTEGER_KIND.name: _faulty_serializer, + }, + ) + + # then + assert len(faulty_calls) == 1, "Expected faulty serializer attempted" + assert result == { + "some": ("serialized", "data"), + "other": ("serialized", "another"), + } + + +def test_serialize_data_piece_for_wildcard_output_when_serializers_found_and_successful() -> ( + None +): + # given + def _valid_serializer(value: Any) -> Any: + return "serialized", value + + # when + result = serialize_data_piece( + output_name="my_output", + data_piece={"some": "data", "other": "another"}, + kind={"some": [INTEGER_KIND, STRING_KIND], "other": [STRING_KIND]}, + kinds_serializers={ + STRING_KIND.name: _valid_serializer, + INTEGER_KIND.name: _valid_serializer, + }, + ) + + # then + assert result == { + "some": ("serialized", "data"), + "other": ("serialized", "another"), + } + + +def test_serialize_data_piece_for_specific_output_when_serializer_not_found() -> None: + # when + result = serialize_data_piece( + output_name="my_output", + data_piece="data", + kind=[STRING_KIND], + kinds_serializers={}, + ) + + # then + assert result == "data", "Expected data not to change" + + +def test_serialize_data_piece_for_specific_output_when_serializers_found_but_all_failing() -> ( + None +): + # given + def _faulty_serializer(value: Any) -> Any: + raise Exception() + + # when + with pytest.raises(ExecutionEngineRuntimeError): + _ = serialize_data_piece( + output_name="my_output", + data_piece="data", + kind=[STRING_KIND, INTEGER_KIND], + kinds_serializers={ + STRING_KIND.name: _faulty_serializer, + INTEGER_KIND.name: _faulty_serializer, + }, + ) + + +def test_serialize_data_piece_for_specific_output_when_serializers_found_with_one_failing_and_one_successful() -> ( + None +): + # given + faulty_calls = [] + + def _faulty_serializer(value: Any) -> Any: + faulty_calls.append(1) + raise Exception() + + def _valid_serializer(value: Any) -> Any: + return "serialized", value + + # when + result = serialize_data_piece( + output_name="my_output", + data_piece="data", + kind=[INTEGER_KIND, STRING_KIND], + kinds_serializers={ + STRING_KIND.name: _valid_serializer, + INTEGER_KIND.name: _faulty_serializer, + }, + ) + + # then + assert len(faulty_calls) == 1, "Expected faulty serializer attempted" + assert result == ("serialized", "data") + + +def test_serialize_data_piece_for_specific_output_when_serializers_found_and_successful() -> ( + None +): + # given + def _valid_serializer(value: Any) -> Any: + return "serialized", value + + # when + result = serialize_data_piece( + output_name="my_output", + data_piece="data", + kind=[INTEGER_KIND, STRING_KIND], + kinds_serializers={ + STRING_KIND.name: _valid_serializer, + INTEGER_KIND.name: _valid_serializer, + }, + ) + + # then + assert result == ("serialized", "data") diff --git a/tests/workflows/unit_tests/execution_engine/introspection/plugin_with_kinds_serializers/__init__.py b/tests/workflows/unit_tests/execution_engine/introspection/plugin_with_kinds_serializers/__init__.py new file mode 100644 index 000000000..ae3a1b0fc --- /dev/null +++ b/tests/workflows/unit_tests/execution_engine/introspection/plugin_with_kinds_serializers/__init__.py @@ -0,0 +1,33 @@ +from typing import List, Type + +from inference.core.workflows.execution_engine.entities.types import Kind +from inference.core.workflows.prototypes.block import WorkflowBlock + +MY_KIND_1 = Kind(name="1") +MY_KIND_2 = Kind(name="2") +MY_KIND_3 = Kind(name="3") + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [] + + +def load_kinds() -> List[Kind]: + return [ + MY_KIND_1, + MY_KIND_2, + MY_KIND_3, + ] + + +KINDS_SERIALIZERS = { + "1": lambda value: "1", + "2": lambda value: "2", + "3": lambda value: "3", +} + +KINDS_DESERIALIZERS = { + "1": lambda name, value: "1", + "2": lambda name, value: "2", + "3": lambda name, value: "3", +} diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py index ed12760fe..7de1b6553 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_blocks_loader.py @@ -19,6 +19,8 @@ load_blocks_from_plugin, load_initializers, load_initializers_from_plugin, + load_kinds_deserializers, + load_kinds_serializers, load_workflow_blocks, ) from tests.workflows.unit_tests.execution_engine.introspection import ( @@ -426,3 +428,47 @@ def test_is_block_compatible_with_execution_engine_when_block_execution_engine_c block_source="workflows_core", block_identifier="some", ) + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_load_kinds_serializers( + get_plugin_modules_mock: MagicMock, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.unit_tests.execution_engine.introspection.plugin_with_kinds_serializers" + ] + + # when + result = load_kinds_serializers() + + # then + assert len(result) > 0 + assert result["1"]("some") == "1", "Expected hardcoded value from serializer" + assert result["2"]("some") == "2", "Expected hardcoded value from serializer" + assert result["3"]("some") == "3", "Expected hardcoded value from serializer" + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_load_kinds_deserializers( + get_plugin_modules_mock: MagicMock, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.unit_tests.execution_engine.introspection.plugin_with_kinds_serializers" + ] + + # when + result = load_kinds_deserializers() + + # then + assert len(result) > 0 + assert ( + result["1"]("some", "value") == "1" + ), "Expected hardcoded value from deserializer" + assert ( + result["2"]("some", "value") == "2" + ), "Expected hardcoded value from deserializer" + assert ( + result["3"]("some", "value") == "3" + ), "Expected hardcoded value from deserializer" From aba0aaf318b83d0c81c6f8735ef0828b6eb93ad9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 17:51:53 +0100 Subject: [PATCH 14/67] Add docs - part 2 --- docs/workflows/blocks_bundling.md | 59 +++++++++++++++++++++++++ docs/workflows/create_workflow_block.md | 21 +++++---- docs/workflows/definitions.md | 53 ++++++++++++++++++++-- docs/workflows/workflows_compiler.md | 13 +++++- 4 files changed, 130 insertions(+), 16 deletions(-) diff --git a/docs/workflows/blocks_bundling.md b/docs/workflows/blocks_bundling.md index fd565b792..d03e2c53f 100644 --- a/docs/workflows/blocks_bundling.md +++ b/docs/workflows/blocks_bundling.md @@ -108,6 +108,65 @@ REGISTERED_INITIALIZERS = { } ``` +## Serializers and deserializers for *Kinds* + +Support for custom serializers and deserializers was introduced in Execution Engine `v1.3.0`. +From that version onward it is possible to point custom functions that +Execution Engine should use to serialize and deserialize any *[kind](/workflows/kinds/)*. + +Deserializers will determine how to decode inputs send through the wire +into internal data representation used by blocks. Serializers, on the other hand, +are useful when Workflow results are to be send through the wire. + +Below you may find example on how to add serializer and deserializer +for arbitrary kind. The code should be placed in main `__init__.py` of +your plugin: + +```python +from typing import Any + +def serialize_kind(value: Any) -> Any: + # place here the code that will be used to + # transform internal Workflows data representation into + # the external one (that can be sent through the wire in JSON, using + # default JSON encoder for Python). + pass + + +def deserialize_kind(parameter_name: str, value: Any) -> Any: + # place here the code that will be used to decode + # data sent through the wire into the Execution Engine + # and transform it into proper internal Workflows data representation + # which is understood by the blocks. + pass + + +KINDS_SERIALIZERS = { + "name_of_the_kind": serialize_kind, +} +KINDS_DESERIALIZERS = { + "name_of_the_kind": deserialize_kind, +} +``` + +### Tips And Tricks + +* Each serializer must be a function taking the value to serialize +and returning serialized value (accepted by default Python JSON encoder) + +* Each deserializer must be a function accepting two parameters - name of +Workflow input to be deserialized and the value to be deserialized - the goal +of the function is to align input data with expected internal representation + +* *Kinds* from `roboflow_core` plugin already have reasonable serializers and +deserializers + +* If you do not like the way how data is serialized in `roboflow_core` plugin, +feel free to alter the serialization methods for *kinds*, simply registering +the function in your plugin and loading it to the Execution Engine - the +serializer/deserializer defined as the last one will be in use. + + ## Enabling plugin in your Workflows ecosystem To load a plugin you must: diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index eaf01d1c2..0edfe945b 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -464,23 +464,22 @@ or alternatively: ??? hint "LEARN MORE: Selecting step outputs" Our siplified example showcased declaration of properties that accept selectors to - images produced by other steps via `StepOutputImageSelector`. + images produced by other steps via `BatchOfDataSelector`. - You can use function `StepOutputSelector(...)` creating field annotations dynamically - to express the that block accepts batch-oriented outputs from other steps of specified - kinds + You can use function `BatchOfDataSelector(...)` creating field annotations dynamically + to express the that block accepts batch-oriented outputs from other steps or Workflow inputs + (of specified kinds). - ```{ .py linenums="1" hl_lines="9 10 25"} + ```{ .py linenums="1" hl_lines="7 18 21 24"} from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, - StepOutputSelector, + BatchOfDataSelector, NUMPY_ARRAY_KIND, + IMAGE_KIND, ) class ImagesSimilarityManifest(WorkflowBlockManifest): @@ -489,13 +488,13 @@ or alternatively: # all properties apart from `type` and `name` are treated as either # definitions of batch-oriented data to be processed by block or its # parameters that influence execution of steps created based on block - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) - example: StepOutputSelector(kind=[NUMPY_ARRAY_KIND]) + example: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) ``` ### Declaring block outputs diff --git a/docs/workflows/definitions.md b/docs/workflows/definitions.md index edb1cc799..1bd3eed46 100644 --- a/docs/workflows/definitions.md +++ b/docs/workflows/definitions.md @@ -14,7 +14,7 @@ analyse it step by step. "version": "1.0", "inputs": [ { - "type": "InferenceImage", + "type": "WorkflowImage", "name": "image" }, { @@ -96,7 +96,7 @@ Our example workflow specifies two inputs: ```json [ { - "type": "InferenceImage", "name": "image" + "type": "WorkflowImage", "name": "image" }, { "type": "WorkflowParameter", "name": "model", "default_value": "yolov8n-640" @@ -105,9 +105,9 @@ Our example workflow specifies two inputs: ``` This entry in definition creates two placeholders that can be filled with data while running workflow. -The first placeholder is named `image` and is of type `InferenceImage`. This special input type is batch-oriented, +The first placeholder is named `image` and is of type `WorkflowImage`. This special input type is batch-oriented, meaning it can accept one or more images at runtime to be processed as a single batch. You can add multiple inputs -of the type `InferenceImage`, and it is expected that the data provided to these placeholders will contain +of the type `WorkflowImage`, and it is expected that the data provided to these placeholders will contain the same number of elements. Alternatively, you can mix inputs of sizes `N` and 1, where `N` represents the number of elements in the batch. @@ -119,6 +119,51 @@ elements, rather than batch of elements, each to be processed individually. More details about the nature of batch-oriented data processing in workflows can be found [here](/workflows/workflow_execution). +### Generic batch-oriented inputs + +Since Execution Engine `v1.3.0` (inference release `v0.26.0`), Workflows support +batch oriented inputs of any *[kind](/workflows/kinds/)* and +*[dimensionality](/workflows/workflow_execution/#steps-interactions-with-data)*. +This inputs are **not enforced for now**, but we expect that as the ecosystem grows, they will +be more and more useful. + +??? Tip "Defining generic batch-oriented inputs" + + If you wanted to replace the `WorkflowImage` input with generic batch-oriented input, + use the following construction: + + ```json + { + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "image", + "kind": ["image"] + } + ] + } + ``` + + Additionally, if your image is supposed to sit at higher *dimensionality level*, + add `dimensionality` property: + + ```{ .json linenums="1" hl_lines="7" } + { + "inputs": [ + { + "type": "WorkflowDataBatch", + "name": "image", + "kind": ["image"], + "dimensionality": 2 + } + ] + } + ``` + + This will alter the expected format of `image` data in Workflow run - + `dimensionality=2` enforces `image` to be nested batch of images - namely list + of list of images. + ## Steps diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 1bb8b58c9..2276eafe1 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -232,7 +232,18 @@ is a batch of data - all batch elements are affected. * **The flow-control step operates on batch-oriented inputs with compatible lineage** - here, the flow-control step can decide separately for each element in the batch which ones will proceed and which ones will be stopped. -#### Batch-processing compatibility +#### Batch-orientation compatibility + +As it was outlined, Workflows define batch-oriented data and parameters. +Some blocks may require batch-oriented inputs, but that is always required. When +block do not require batch-oriented input, it will be fed only with parameters and +will produce a single result. Such outputs can be used as inputs to other steps, +but only if block class returns `False` from `block.accepts_batch_input(...)` method. The +constraint is introduced to ensure stability of blocks interface **for now, and we plan to fix +this in the future releases**. + +On the other hand, batch-oriented outputs are prevented to be feed into +inputs that do expect non-batch parameters. ## Initializing Workflow steps from blocks From 51d5e6fda452e3e9d8601cda1d3657e7fef2127e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 4 Nov 2024 22:32:26 +0100 Subject: [PATCH 15/67] Adjust docs to changes --- docs/workflows/create_workflow_block.md | 258 ++++++++++-------------- 1 file changed, 108 insertions(+), 150 deletions(-) diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 0edfe945b..58647516b 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -313,37 +313,38 @@ we will be creating SIMD block. Let's see how to add definitions of those inputs to manifest: - ```{ .py linenums="1" hl_lines="2 6 7 8 9 17 18 19 20 21 22"} + ```{ .py linenums="1" hl_lines="2 6-9 18-23"} from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, ) + class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str # all properties apart from `type` and `name` are treated as either # definitions of batch-oriented data to be processed by block or its # parameters that influence execution of steps created based on block - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) ``` * in the lines `2-9`, we've added a couple of imports to ensure that we have everything needed - * line `17` defines `image_1` parameter - as manifest is prototype for Workflow Definition, + * line `18` defines `image_1` parameter - as manifest is prototype for Workflow Definition, the only way to tell about image to be used by step is to provide selector - we have - two specialised types in core library that can be used - `WorkflowImageSelector` and `StepOutputImageSelector`. - If you look deeper into codebase, you will discover those are type aliases - telling `pydantic` + a specialised type in core library that can be used - `BatchOfDataSelector`. + If you look deeper into codebase, you will discover this is type alias constructor function - telling `pydantic` to expect string matching `$inputs.{name}` and `$steps.{name}.*` patterns respectively, additionally providing extra schema field metadata that tells Workflows ecosystem components that the `kind` of data behind selector is [image](/workflows/kinds/image/). @@ -351,7 +352,7 @@ we will be creating SIMD block. * denoting `pydantic` `Field(...)` attribute in the last parts of line `17` is optional, yet appreciated, especially for blocks intended to cooperate with Workflows UI - * starting in line `20`, you can find definition of `image_2` parameter which is very similar to `image_1`. + * starting in line `21`, you can find definition of `image_2` parameter which is very similar to `image_1`. Such definition of manifest can handle the following step declaration in Workflow definition: @@ -385,30 +386,31 @@ batch-oriented and will affect all batch elements passed to the step. ??? example "Adding parameter to the manifest" - ```{ .py linenums="1" hl_lines="9 10 11 26 27 28 29 30 31 32"} + ```{ .py linenums="1" hl_lines="9-11 27-33"} from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, ) + class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str # all properties apart from `type` and `name` are treated as either # definitions of batch-oriented data to be processed by block or its # parameters that influence execution of steps created based on block - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -430,7 +432,7 @@ batch-oriented and will affect all batch elements passed to the step. * line `11` imports [`float_zero_to_one`](/workflows/kinds/float_zero_to_one) `kind` definition which will be used later - * in line `26` we start defining parameter called `similarity_threshold`. Manifest will accept + * in line `27` we start defining parameter called `similarity_threshold`. Manifest will accept either float values (in range `[0.0-1.0]`) or selector to workflow input of `kind` [`float_zero_to_one`](/workflows/kinds/float_zero_to_one). Please point out on how function creating type annotation (`WorkflowParameterSelector(...)`) is used - @@ -461,42 +463,6 @@ or alternatively: } ``` -??? hint "LEARN MORE: Selecting step outputs" - - Our siplified example showcased declaration of properties that accept selectors to - images produced by other steps via `BatchOfDataSelector`. - - You can use function `BatchOfDataSelector(...)` creating field annotations dynamically - to express the that block accepts batch-oriented outputs from other steps or Workflow inputs - (of specified kinds). - - ```{ .py linenums="1" hl_lines="7 18 21 24"} - from typing import Literal, Union - from pydantic import Field - from inference.core.workflows.prototypes.block import ( - WorkflowBlockManifest, - ) - from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, - NUMPY_ARRAY_KIND, - IMAGE_KIND, - ) - - class ImagesSimilarityManifest(WorkflowBlockManifest): - type: Literal["my_plugin/images_similarity@v1"] - name: str - # all properties apart from `type` and `name` are treated as either - # definitions of batch-oriented data to be processed by block or its - # parameters that influence execution of steps created based on block - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( - description="First image to calculate similarity", - ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( - description="Second image to calculate similarity", - ) - example: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) - ``` - ### Declaring block outputs Our manifest is ready regarding properties that can be declared in Workflow definitions, @@ -509,7 +475,7 @@ run the block. to increase block stability, we advise to provide information about execution engine compatibility. - ```{ .py linenums="1" hl_lines="1 5 13 33-40 42-44"} + ```{ .py linenums="1" hl_lines="1 5 13 34-41 43-45"} from typing import Literal, Union, List, Optional from pydantic import Field from inference.core.workflows.prototypes.block import ( @@ -517,21 +483,22 @@ run the block. OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) + class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -563,11 +530,11 @@ run the block. * line `13` imports [`boolean`](/workflows/kinds/boolean) `kind` to be used in outputs definitions - * lines `33-40` declare class method to specify outputs from the block - + * lines `34-41` declare class method to specify outputs from the block - each entry in list declare one return property for each batch element and its `kind`. Our block will return boolean flag `images_match` for each pair of images. - * lines `42-44` declare compatibility of the block with Execution Engine - + * lines `43-45` declare compatibility of the block with Execution Engine - see [versioning page](/workflows/versioning/) for more details As a result of those changes: @@ -590,7 +557,7 @@ in their inputs * additionally, block manifest should implement instance method `get_actual_outputs(...)` that provides list of actual outputs that can be generated based on filled manifest data - ```{ .py linenums="1" hl_lines="14 35-42 44-49"} + ```{ .py linenums="1" hl_lines="14 36-43 45-50"} from typing import Literal, Union, List, Optional from pydantic import Field from inference.core.workflows.prototypes.block import ( @@ -598,8 +565,8 @@ in their inputs OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, @@ -607,13 +574,14 @@ in their inputs WILDCARD_KIND, ) + class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -656,7 +624,7 @@ block. ??? example "Block scaffolding" - ```{ .py linenums="1" hl_lines="1 5 6 8-11 56-68"} + ```{ .py linenums="1" hl_lines="1 5 6 8-11 54-56 58-64"} from typing import Literal, Union, List, Optional, Type from pydantic import Field from inference.core.workflows.prototypes.block import ( @@ -669,8 +637,8 @@ block. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, @@ -680,10 +648,10 @@ block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -727,10 +695,10 @@ block. provide additional symbols required to properly define block class and all of its methods signatures - * line `59` defines class method `get_manifest(...)` to simply return + * lines `54-56` defines class method `get_manifest(...)` to simply return the manifest class we cretaed earlier - * lines `62-68` define `run(...)` function, which Execution Engine + * lines `58-64` define `run(...)` function, which Execution Engine will invoke with data to get desired results ### Providing implementation for block logic @@ -761,8 +729,8 @@ it can produce meaningful results. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, @@ -772,10 +740,10 @@ it can produce meaningful results. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -910,8 +878,8 @@ on how to use it for your block. Batch, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputImageSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, FLOAT_ZERO_TO_ONE_KIND, @@ -921,10 +889,10 @@ on how to use it for your block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -1028,8 +996,8 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con ) from inference.core.workflows.execution_engine.entities.types import ( StepSelector, - WorkflowImageSelector, - StepOutputImageSelector, + BatchOfDataSelector, + IMAGE_KIND, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -1043,7 +1011,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/random_continue@v1"] name: str - image: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + image: BatchOfDataSelector(kind=[IMAGE_KIND]) = ImageInputField probability: float next_steps: List[StepSelector] = Field( description="Reference to step which shall be executed if expression evaluates to true", @@ -1195,7 +1163,7 @@ def run(self, predictions: List[dict]) -> BlockResult: OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputSelector, + BatchOfDataSelector, OBJECT_DETECTION_PREDICTION_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1209,7 +1177,7 @@ def run(self, predictions: List[dict]) -> BlockResult: class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/fusion_of_predictions@v1"] name: str - predictions: List[StepOutputSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( + predictions: List[BatchOfDataSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( description="Selectors to step outputs", examples=[["$steps.model_1.predictions", "$steps.model_2.predictions"]], ) @@ -1280,7 +1248,7 @@ keys serve as names for those selectors. OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - StepOutputSelector, + BatchOfDataSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -1294,7 +1262,7 @@ keys serve as names for those selectors. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/named_selectors_example@v1"] name: str - data: Dict[str, StepOutputSelector(), WorkflowParameterSelector()] = Field( + data: Dict[str, BatchOfDataSelector(), WorkflowParameterSelector()] = Field( description="Selectors to step outputs", examples=[{"a": $steps.model_1.predictions", "b": "$Inputs.data"}], ) @@ -1325,7 +1293,7 @@ keys serve as names for those selectors. ``` * lines `23-26` depict how to define manifest field capable of accepting - list of selectors + dictionary of selectors - providing mapping between selector name and value * line `47` shows what to expect as input to block's `run(...)` method - dict of objects which are reffered with selectors. If the block accepted @@ -1386,7 +1354,7 @@ the method signatures. In this example, we perform dynamic crop of image based on predictions. - ```{ .py linenums="1" hl_lines="30-32 65 66-67"} + ```{ .py linenums="1" hl_lines="28-30 63 64-65"} from typing import Dict, List, Literal, Optional, Type, Union from uuid import uuid4 @@ -1399,9 +1367,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1411,8 +1377,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] - predictions: StepOutputSelector( + image: BatchOfDataSelector(kind=[IMAGE_KIND]) + predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1456,16 +1422,16 @@ the method signatures. return crops ``` - * in lines `30-32` manifest class declares output dimensionality + * in lines `28-30` manifest class declares output dimensionality offset - value `1` should be understood as adding `1` to dimensionality level - * point out, that in line `65`, block eliminates empty images from further processing but + * point out, that in line `63`, block eliminates empty images from further processing but placing `None` instead of dictionatry with outputs. This would utilise the same Execution Engine behaviour that is used for conditional execution - datapoint will be eliminated from downstream processing (unless steps requesting empty inputs are present down the line). - * in lines `66-67` results for single input `image` and `predictions` are collected - + * in lines `64-65` results for single input `image` and `predictions` are collected - it is meant to be list of dictionares containing all registered outputs as keys. Execution engine will understand that the step returns batch of elements for each input element and create nested sturcures of indices to keep track of during execution of downstream steps. @@ -1475,7 +1441,7 @@ the method signatures. In this example, the block visualises crops predictions and creates tiles presenting all crops predictions in single output image. - ```{ .py linenums="1" hl_lines="31-33 50-51 61-62"} + ```{ .py linenums="1" hl_lines="29-31 48-49 59-60"} from typing import List, Literal, Type, Union import supervision as sv @@ -1488,9 +1454,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1501,8 +1465,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - crops: Union[WorkflowImageSelector, StepOutputImageSelector] - crops_predictions: StepOutputSelector( + crops: BatchOfDataSelector(kind=[IMAGE_KIND]) + crops_predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @@ -1540,10 +1504,10 @@ the method signatures. return {"visualisations": tile} ``` - * in lines `31-33` manifest class declares output dimensionality + * in lines `29-31` manifest class declares output dimensionality offset - value `-1` should be understood as decreasing dimensionality level by `1` - * in lines `50-51` you can see the impact of output dimensionality decrease + * in lines `48-49` you can see the impact of output dimensionality decrease on the method signature. Both inputs are artificially wrapped in `Batch[]` container. This is done by Execution Engine automatically on output dimensionality decrease when all inputs have the same dimensionality to enable access to all elements occupying @@ -1551,7 +1515,7 @@ the method signatures. from top-level batch will be grouped. For instance, if you had two input images that you cropped - crops from those two different images will be grouped separately. - * lines `61-62` illustrate how output is constructed - single value is returned and that value + * lines `59-60` illustrate how output is constructed - single value is returned and that value will be indexed by Execution Engine in output batch with reduced dimensionality === "different input dimensionalities" @@ -1560,7 +1524,7 @@ the method signatures. crops of original image - result is to provide single detections with all partial ones being merged. - ```{ .py linenums="1" hl_lines="32-37 39-41 63-64 70"} + ```{ .py linenums="1" hl_lines="31-36 38-40 62-63 69"} from copy import deepcopy from typing import Dict, List, Literal, Optional, Type, Union @@ -1574,9 +1538,8 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1587,8 +1550,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] - image_predictions: StepOutputSelector( + image: BatchOfDataSelector(kind=[IMAGE_KIND]) + image_predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1634,11 +1597,11 @@ the method signatures. ``` - * in lines `32-37` manifest class declares input dimensionalities offset, indicating + * in lines `31-36` manifest class declares input dimensionalities offset, indicating `image` parameter being top-level and `image_predictions` being nested batch of predictions * whenever different input dimensionalities are declared, dimensionality reference property - must be pointed (see lines `39-41`) - this dimensionality level would be used to calculate + must be pointed (see lines `38-40`) - this dimensionality level would be used to calculate output dimensionality - in this particular case, we specify `image`. This choice has an implication in the expected format of result - in the chosen scenario we are supposed to return single dictionary with all registered outputs keys. If our choice is `image_predictions`, @@ -1646,11 +1609,11 @@ the method signatures. `get_dimensionality_reference_property(...)` which dimensionality level should be associated to the output. - * lines `63-64` present impact of dimensionality offsets specified in lines `32-37`. It is clearly + * lines `63-64` present impact of dimensionality offsets specified in lines `31-36`. It is clearly visible that `image_predictions` is a nested batch regarding `image`. Obviously, only nested predictions relevant for the specific `images` are grouped in batch and provided to the method in runtime. - * as mentioned earlier, line `70` construct output being single dictionary, as we register output + * as mentioned earlier, line `69` construct output being single dictionary, as we register output at dimensionality level of `image` (which was also shipped as single element) @@ -1660,7 +1623,7 @@ the method signatures. In this example, we perform dynamic crop of image based on predictions. - ```{ .py linenums="1" hl_lines="31-33 35-37 57-58 72 73-75"} + ```{ .py linenums="1" hl_lines="29-31 33-35 55-56 70 71-73"} from typing import Dict, List, Literal, Optional, Type, Union from uuid import uuid4 @@ -1674,9 +1637,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1686,8 +1647,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] - predictions: StepOutputSelector( + image: BatchOfDataSelector(kind=[IMAGE_KIND]) + predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1738,21 +1699,21 @@ the method signatures. return results ``` - * in lines `31-33` manifest declares that block accepts batches of inputs + * in lines `29-31` manifest declares that block accepts batches of inputs - * in lines `35-37` manifest class declares output dimensionality + * in lines `33-35` manifest class declares output dimensionality offset - value `1` should be understood as adding `1` to dimensionality level - * in lines `57-68`, signature of input parameters reflects that the `run(...)` method + * in lines `55-66`, signature of input parameters reflects that the `run(...)` method runs against inputs of the same dimensionality and those inputs are provided in batches - * point out, that in line `72`, block eliminates empty images from further processing but + * point out, that in line `70`, block eliminates empty images from further processing but placing `None` instead of dictionatry with outputs. This would utilise the same Execution Engine behaviour that is used for conditional execution - datapoint will be eliminated from downstream processing (unless steps requesting empty inputs are present down the line). - * construction of the output, presented in lines `73-75` indicates two levels of nesting. + * construction of the output, presented in lines `71-73` indicates two levels of nesting. First of all, block operates on batches, so it is expected to return list of outputs, one output for each input batch element. Additionally, this output element for each input batch element turns out to be nested batch - hence for each input iage and prediction, block @@ -1764,7 +1725,7 @@ the method signatures. In this example, the block visualises crops predictions and creates tiles presenting all crops predictions in single output image. - ```{ .py linenums="1" hl_lines="31-33 35-37 54-55 68-69"} + ```{ .py linenums="1" hl_lines="29-31 33-35 52-53 66-67"} from typing import List, Literal, Type, Union import supervision as sv @@ -1777,9 +1738,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1790,8 +1749,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - images_crops: Union[WorkflowImageSelector, StepOutputImageSelector] - crops_predictions: StepOutputSelector( + images_crops: BatchOfDataSelector(kind=[IMAGE_KIND]) + crops_predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @@ -1836,19 +1795,19 @@ the method signatures. return visualisations ``` - * lines `31-33` manifest that block is expected to take batches as input + * lines `29-31` manifest that block is expected to take batches as input - * in lines `35-37` manifest class declares output dimensionality + * in lines `33-35` manifest class declares output dimensionality offset - value `-1` should be understood as decreasing dimensionality level by `1` - * in lines `54-55` you can see the impact of output dimensionality decrease + * in lines `52-53` you can see the impact of output dimensionality decrease and batch processing on the method signature. First "layer" of `Batch[]` is a side effect of the fact that manifest declared that block accepts batches of inputs. The second "layer" comes from output dimensionality decrease. Execution Engine wrapps up the dimension to be reduced into additional `Batch[]` container porvided in inputs, such that programmer is able to collect all nested batches elements that belong to specific top-level batch element. - * lines `68-69` illustrate how output is constructed - for each top-level batch element, block + * lines `66-67` illustrate how output is constructed - for each top-level batch element, block aggregates all crops and predictions and creates a single tile. As block accepts batches of inputs, this procedure end up with one tile for each top-level batch element - hence list of dictionaries is expected to be returned. @@ -1859,7 +1818,7 @@ the method signatures. crops of original image - result is to provide single detections with all partial ones being merged. - ```{ .py linenums="1" hl_lines="32-34 36-41 43-45 67-68 77-78"} + ```{ .py linenums="1" hl_lines="31-33 35-40 42-44 66-67 76-77"} from copy import deepcopy from typing import Dict, List, Literal, Optional, Type, Union @@ -1873,9 +1832,8 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - StepOutputImageSelector, - StepOutputSelector, - WorkflowImageSelector, + BatchOfDataSelector, + IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1886,8 +1844,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] - images_predictions: StepOutputSelector( + images: BatchOfDataSelector(kind=[IMAGE_KIND]) + images_predictions: BatchOfDataSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1940,26 +1898,26 @@ the method signatures. return result ``` - * lines `32-34` manifest that block is expected to take batches as input + * lines `31-33` manifest that block is expected to take batches as input - * in lines `36-41` manifest class declares input dimensionalities offset, indicating + * in lines `35-40` manifest class declares input dimensionalities offset, indicating `image` parameter being top-level and `image_predictions` being nested batch of predictions * whenever different input dimensionalities are declared, dimensionality reference property - must be pointed (see lines `43-45`) - this dimensionality level would be used to calculate + must be pointed (see lines `42-44`) - this dimensionality level would be used to calculate output dimensionality - in this particular case, we specify `image`. This choice has an implication in the expected format of result - in the chosen scenario we are supposed to return single dictionary for each element of `image` batch. If our choice is `image_predictions`, we would return list of dictionaries (of size equal to length of nested `image_predictions` batch) for each input `image` batch element. - * lines `67-68` present impact of dimensionality offsets specified in lines `36-41` as well as + * lines `66-67` present impact of dimensionality offsets specified in lines `35-40` as well as the declararion of batch processing from lines `32-34`. First "layer" of `Batch[]` container comes from the latter, nested `Batch[Batch[]]` for `images_predictions` comes from the definition of input dimensionality offset. It is clearly visible that `image_predictions` holds batch of predictions relevant for specific elements of `image` batch. - * as mentioned earlier, lines `77-78` construct output being single dictionary for each element of `image` + * as mentioned earlier, lines `76-77` construct output being single dictionary for each element of `image` batch @@ -1988,7 +1946,7 @@ that even if some elements are empty, the output lacks missing elements making i Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import StepOutputSelector + from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -1998,7 +1956,7 @@ that even if some elements are empty, the output lacks missing elements making i class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/first_non_empty_or_default@v1"] - data: List[StepOutputSelector()] + data: List[BatchOfDataSelector()] default: Any @classmethod @@ -2071,7 +2029,7 @@ Let's see how to request init parameters while defining block. Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import StepOutputSelector + from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -2081,7 +2039,7 @@ Let's see how to request init parameters while defining block. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/example@v1"] - data: List[StepOutputSelector()] + data: List[BatchOfDataSelector()] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: From 272e7ef90d2448c5ccec7dbba5dc070f7b2634c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 10:46:40 +0100 Subject: [PATCH 16/67] Add extension to inference_sdk to handle nested batches of input parameters --- inference_sdk/http/client.py | 6 +- inference_sdk/http/utils/loaders.py | 23 ++++ inference_sdk/http/utils/requests.py | 29 ++++- .../unit_tests/http/test_client.py | 113 +++++++++++++++--- .../unit_tests/http/utils/test_loaders.py | 62 +++++++++- .../unit_tests/http/utils/test_requests.py | 48 +++++++- 6 files changed, 260 insertions(+), 21 deletions(-) diff --git a/inference_sdk/http/client.py b/inference_sdk/http/client.py index c87af9e59..085ce24d8 100644 --- a/inference_sdk/http/client.py +++ b/inference_sdk/http/client.py @@ -42,6 +42,7 @@ ) from inference_sdk.http.utils.iterables import unwrap_single_element_list from inference_sdk.http.utils.loaders import ( + load_nested_batches_of_inference_input, load_static_inference_input, load_static_inference_input_async, load_stream_inference_input, @@ -65,6 +66,7 @@ api_key_safe_raise_for_status, deduct_api_key_from_string, inject_images_into_payload, + inject_nested_batches_of_images_into_payload, ) from inference_sdk.utils.decorators import deprecated, experimental @@ -1156,10 +1158,10 @@ def _run_workflow( } inputs = {} for image_name, image in images.items(): - loaded_image = load_static_inference_input( + loaded_image = load_nested_batches_of_inference_input( inference_input=image, ) - inject_images_into_payload( + inject_nested_batches_of_images_into_payload( payload=inputs, encoded_images=loaded_image, key=image_name, diff --git a/inference_sdk/http/utils/loaders.py b/inference_sdk/http/utils/loaders.py index 9e398803f..24721da2a 100644 --- a/inference_sdk/http/utils/loaders.py +++ b/inference_sdk/http/utils/loaders.py @@ -52,6 +52,29 @@ def load_directory_inference_input( yield path, cv2.imread(path) +def load_nested_batches_of_inference_input( + inference_input: Union[list, ImagesReference], + max_height: Optional[int] = None, + max_width: Optional[int] = None, +) -> Union[Tuple[str, Optional[float]], list]: + if not isinstance(inference_input, list): + return load_static_inference_input( + inference_input=inference_input, + max_height=max_height, + max_width=max_width, + )[0] + result = [] + for element in inference_input: + result.append( + load_nested_batches_of_inference_input( + inference_input=element, + max_height=max_height, + max_width=max_width, + ) + ) + return result + + def load_static_inference_input( inference_input: Union[ImagesReference, List[ImagesReference]], max_height: Optional[int] = None, diff --git a/inference_sdk/http/utils/requests.py b/inference_sdk/http/utils/requests.py index b38b1f9e5..e2b607f96 100644 --- a/inference_sdk/http/utils/requests.py +++ b/inference_sdk/http/utils/requests.py @@ -1,5 +1,5 @@ import re -from typing import List, Optional, Tuple +from typing import List, Optional, Tuple, Union from requests import Response @@ -44,3 +44,30 @@ def inject_images_into_payload( else: payload[key] = {"type": "base64", "value": encoded_images[0][0]} return payload + + +def inject_nested_batches_of_images_into_payload( + payload: dict, + encoded_images: Union[list, Tuple[str, Optional[float]]], + key: str = "image", +) -> dict: + payload_value = _batch_of_images_into_inference_format( + encoded_images=encoded_images, + ) + payload[key] = payload_value + return payload + + +def _batch_of_images_into_inference_format( + encoded_images: Union[list, Tuple[str, Optional[float]]], +) -> Union[dict, list]: + if not isinstance(encoded_images, list): + return {"type": "base64", "value": encoded_images[0]} + result = [] + for element in encoded_images: + result.append( + _batch_of_images_into_inference_format( + encoded_images=element, + ) + ) + return result diff --git a/tests/inference_sdk/unit_tests/http/test_client.py b/tests/inference_sdk/unit_tests/http/test_client.py index a92fe07da..bbbbbc347 100644 --- a/tests/inference_sdk/unit_tests/http/test_client.py +++ b/tests/inference_sdk/unit_tests/http/test_client.py @@ -3575,7 +3575,7 @@ def test_infer_from_workflow_when_no_parameters_given( }, "Request payload must contain api key and inputs" -@mock.patch.object(client, "load_static_inference_input") +@mock.patch.object(client, "load_nested_batches_of_inference_input") @pytest.mark.parametrize( "legacy_endpoints, endpoint_to_use, parameter_name", [ @@ -3584,7 +3584,7 @@ def test_infer_from_workflow_when_no_parameters_given( ], ) def test_infer_from_workflow_when_parameters_and_excluded_fields_given( - load_static_inference_input_mock: MagicMock, + load_nested_batches_of_inference_input_mock: MagicMock, requests_mock: Mocker, legacy_endpoints: bool, endpoint_to_use: str, @@ -3599,8 +3599,8 @@ def test_infer_from_workflow_when_parameters_and_excluded_fields_given( "outputs": [{"some": 3}], }, ) - load_static_inference_input_mock.side_effect = [ - [("base64_image_1", 0.5)], + load_nested_batches_of_inference_input_mock.side_effect = [ + ("base64_image_1", 0.5), [("base64_image_2", 0.5), ("base64_image_3", 0.5)], ] method = ( @@ -3647,7 +3647,7 @@ def test_infer_from_workflow_when_parameters_and_excluded_fields_given( }, "Request payload must contain api key and inputs" -@mock.patch.object(client, "load_static_inference_input") +@mock.patch.object(client, "load_nested_batches_of_inference_input") @pytest.mark.parametrize( "legacy_endpoints, endpoint_to_use, parameter_name", [ @@ -3656,7 +3656,7 @@ def test_infer_from_workflow_when_parameters_and_excluded_fields_given( ], ) def test_infer_from_workflow_when_usage_of_cache_disabled( - load_static_inference_input_mock: MagicMock, + load_nested_batches_of_inference_input_mock: MagicMock, requests_mock: Mocker, legacy_endpoints: bool, endpoint_to_use: str, @@ -3671,8 +3671,8 @@ def test_infer_from_workflow_when_usage_of_cache_disabled( "outputs": [{"some": 3}], }, ) - load_static_inference_input_mock.side_effect = [ - [("base64_image_1", 0.5)], + load_nested_batches_of_inference_input_mock.side_effect = [ + ("base64_image_1", 0.5), [("base64_image_2", 0.5), ("base64_image_3", 0.5)], ] method = ( @@ -3714,7 +3714,7 @@ def test_infer_from_workflow_when_usage_of_cache_disabled( }, "Request payload must contain api key, inputs and no cache flag" -@mock.patch.object(client, "load_static_inference_input") +@mock.patch.object(client, "load_nested_batches_of_inference_input") @pytest.mark.parametrize( "legacy_endpoints, endpoint_to_use, parameter_name", [ @@ -3723,7 +3723,7 @@ def test_infer_from_workflow_when_usage_of_cache_disabled( ], ) def test_infer_from_workflow_when_usage_of_profiler_enabled( - load_static_inference_input_mock: MagicMock, + load_nested_batches_of_inference_input_mock: MagicMock, requests_mock: Mocker, legacy_endpoints: bool, endpoint_to_use: str, @@ -3742,8 +3742,8 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( "profiler_trace": [{"my": "trace"}] }, ) - load_static_inference_input_mock.side_effect = [ - [("base64_image_1", 0.5)], + load_nested_batches_of_inference_input_mock.side_effect = [ + ("base64_image_1", 0.5), [("base64_image_2", 0.5), ("base64_image_3", 0.5)], ] method = ( @@ -3790,6 +3790,87 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( assert data == [{"my": "trace"}], "Trace content must be fully saved" +@mock.patch.object(client, "load_nested_batches_of_inference_input") +@pytest.mark.parametrize( + "legacy_endpoints, endpoint_to_use, parameter_name", + [ + (True, "/infer/workflows/my_workspace/my_workflow", "workflow_name"), + (False, "/my_workspace/workflows/my_workflow", "workflow_id"), + ], +) +def test_infer_from_workflow_when_nested_batch_of_inputs_provided( + load_nested_batches_of_inference_input_mock: MagicMock, + requests_mock: Mocker, + legacy_endpoints: bool, + endpoint_to_use: str, + parameter_name: str, +) -> None: + # given + api_url = "http://some.com" + http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) + requests_mock.post( + f"{api_url}{endpoint_to_use}", + json={ + "outputs": [{"some": 3}], + }, + ) + load_nested_batches_of_inference_input_mock.side_effect = [ + [ + [("base64_image_1", 0.5), ("base64_image_2", 0.5)], + [("base64_image_3", 0.5), ("base64_image_4", 0.5), ("base64_image_5", 0.5)], + [("base64_image_6", 0.5)], + ], + ] + method = ( + http_client.infer_from_workflow + if legacy_endpoints + else http_client.run_workflow + ) + + # when + result = method( + workspace_name="my_workspace", + images={"image_1": [["1", "2"], ["3", "4", "5"], ["6"]]}, + parameters={ + "batch_oriented_param": [ + ["a", "b"], + ["c", "d", "e"], + ["f"] + ] + }, + **{parameter_name: "my_workflow"}, + ) + + # then + assert result == [{"some": 3}], "Response from API must be properly decoded" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key", + "use_cache": True, + "enable_profiling": False, + "inputs": { + "image_1": [ + [ + {"type": "base64", "value": "base64_image_1"}, + {"type": "base64", "value": "base64_image_2"}, + ], + [ + {"type": "base64", "value": "base64_image_3"}, + {"type": "base64", "value": "base64_image_4"}, + {"type": "base64", "value": "base64_image_5"}, + ], + [ + {"type": "base64", "value": "base64_image_6"}, + ], + ], + "batch_oriented_param": [ + ["a", "b"], + ["c", "d", "e"], + ["f"], + ], + }, + }, "Request payload must contain api key, inputs and no cache flag" + + @pytest.mark.parametrize( "legacy_endpoints, endpoint_to_use, parameter_name", [ @@ -3849,13 +3930,13 @@ def test_infer_from_workflow_when_both_workflow_name_and_specs_given() -> None: ) -@mock.patch.object(client, "load_static_inference_input") +@mock.patch.object(client, "load_nested_batches_of_inference_input") @pytest.mark.parametrize( "legacy_endpoints, endpoint_to_use", [(True, "/infer/workflows"), (False, "/workflows/run")], ) def test_infer_from_workflow_when_custom_workflow_with_both_parameters_and_excluded_fields_given( - load_static_inference_input_mock: MagicMock, + load_nested_batches_of_inference_input_mock: MagicMock, requests_mock: Mocker, legacy_endpoints: bool, endpoint_to_use: str, @@ -3869,8 +3950,8 @@ def test_infer_from_workflow_when_custom_workflow_with_both_parameters_and_exclu "outputs": [{"some": 3}], }, ) - load_static_inference_input_mock.side_effect = [ - [("base64_image_1", 0.5)], + load_nested_batches_of_inference_input_mock.side_effect = [ + ("base64_image_1", 0.5), [("base64_image_2", 0.5), ("base64_image_3", 0.5)], ] method = ( diff --git a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py index 2d83ec9c0..55dc9d21d 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py @@ -22,7 +22,7 @@ load_static_inference_input, load_static_inference_input_async, load_stream_inference_input, - uri_is_http_link, + uri_is_http_link, load_nested_batches_of_inference_input, ) @@ -650,3 +650,63 @@ def test_load_stream_inference_input( get_video_frames_generator_mock.assert_called_once_with( source_path="/some/video.mp4" ) + + +@mock.patch.object(loaders, "load_static_inference_input") +def test_load_nested_batches_of_inference_input_when_single_element_is_given( + load_static_inference_input_mock: MagicMock, +) -> None: + # given + load_static_inference_input_mock.side_effect = [ + ["image_1"] + ] + + # when + result = load_nested_batches_of_inference_input( + inference_input="my_image", + ) + + # then + assert result == "image_1", "Expected direct result from load_static_inference_input()" + + +@mock.patch.object(loaders, "load_static_inference_input") +def test_load_nested_batches_of_inference_input_when_1d_batch_is_given( + load_static_inference_input_mock: MagicMock, +) -> None: + # given + load_static_inference_input_mock.side_effect = [ + ["image_1"], + ["image_2"], + ["image_3"] + ] + + # when + result = load_nested_batches_of_inference_input( + inference_input=["1", "2", "3"], + ) + + # then + assert result == ["image_1", "image_2", "image_3"], "Expected direct result from load_static_inference_input()" + + +@mock.patch.object(loaders, "load_static_inference_input") +def test_load_nested_batches_of_inference_input_when_nested_batch_is_given( + load_static_inference_input_mock: MagicMock, +) -> None: + # given + load_static_inference_input_mock.side_effect = [ + ["image_1"], + ["image_2"], + ["image_3"], + ["image_4"], + ["image_5"], + ] + + # when + result = load_nested_batches_of_inference_input( + inference_input=[["1", "2"], ["3"], [["4", "5"]]], + ) + + # then + assert result == [["image_1", "image_2"], ["image_3"], [["image_4", "image_5"]]] diff --git a/tests/inference_sdk/unit_tests/http/utils/test_requests.py b/tests/inference_sdk/unit_tests/http/utils/test_requests.py index b4d131895..59ff545be 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_requests.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_requests.py @@ -5,7 +5,7 @@ API_KEY_PATTERN, api_key_safe_raise_for_status, deduct_api_key, - inject_images_into_payload, + inject_images_into_payload, inject_nested_batches_of_images_into_payload, ) @@ -146,3 +146,49 @@ def test_inject_images_into_payload_when_payload_key_is_specified() -> None: "my": "payload", "prompt": {"type": "base64", "value": "image_payload_1"}, }, "Payload is expected to be extended with the content of only single image under `prompt` key" + + +def test_inject_nested_batches_of_images_into_payload_when_single_image_given() -> None: + # when + result = inject_nested_batches_of_images_into_payload( + payload={}, + encoded_images=("img1", None), + ) + + # then + assert result == {"image": {"type": "base64", "value": "img1"}} + + +def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_given() -> None: + # when + result = inject_nested_batches_of_images_into_payload( + payload={}, + encoded_images=[("img1", None), ("img2", None)], + ) + + # then + assert result == { + "image": [ + {"type": "base64", "value": "img1"}, + {"type": "base64", "value": "img2"}, + ] + } + + +def test_inject_nested_batches_of_images_into_payload_when_nested_batch_of_images_given() -> None: + # when + result = inject_nested_batches_of_images_into_payload( + payload={}, + encoded_images=[[("img1", None)], [("img2", None), ("img3", None)]], + ) + + # then + assert result == { + "image": [ + [{"type": "base64", "value": "img1"}], + [ + {"type": "base64", "value": "img2"}, + {"type": "base64", "value": "img3"}, + ], + ] + } From 52ddb9f6224e3b3e77946dc12207b7a72facd4a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 13:15:03 +0100 Subject: [PATCH 17/67] Add changes to align batches and scalars regarding their place in ecosystem --- docs/workflows/create_workflow_block.md | 100 +++++----- docs/workflows/definitions.md | 4 +- docs/workflows/execution_engine_changelog.md | 22 +-- .../analytics/data_aggregator/v1.py | 6 +- .../core_steps/analytics/line_counter/v1.py | 6 +- .../core_steps/analytics/line_counter/v2.py | 6 +- .../core_steps/analytics/path_deviation/v1.py | 6 +- .../core_steps/analytics/path_deviation/v2.py | 6 +- .../core_steps/analytics/time_in_zone/v1.py | 6 +- .../core_steps/analytics/time_in_zone/v2.py | 6 +- .../classical_cv/distance_measurement/v1.py | 4 +- .../classical_cv/pixel_color_count/v1.py | 4 +- .../classical_cv/sift_comparison/v1.py | 6 +- .../classical_cv/sift_comparison/v2.py | 6 +- .../classical_cv/size_measurement/v1.py | 6 +- .../core_steps/flow_control/continue_if/v1.py | 6 +- .../flow_control/rate_limiter/v1.py | 4 +- .../workflows/core_steps/formatters/csv/v1.py | 4 +- .../core_steps/formatters/expression/v1.py | 6 +- .../first_non_empty_or_default/v1.py | 4 +- .../core_steps/formatters/json_parser/v1.py | 4 +- .../formatters/property_definition/v1.py | 4 +- .../formatters/vlm_as_classifier/v1.py | 6 +- .../formatters/vlm_as_detector/v1.py | 6 +- .../detections_classes_replacement/v1.py | 16 +- .../fusion/detections_consensus/v1.py | 4 +- .../core_steps/fusion/detections_stitch/v1.py | 4 +- .../fusion/dimension_collapse/v1.py | 4 +- .../models/foundation/florence2/v1.py | 4 +- .../models/foundation/segment_anything2/v1.py | 4 +- .../foundation/stability_ai/inpainting/v1.py | 18 +- .../core_steps/sinks/email_notification/v1.py | 8 +- .../core_steps/sinks/local_file/v1.py | 4 +- .../sinks/roboflow/custom_metadata/v1.py | 6 +- .../sinks/roboflow/dataset_upload/v1.py | 4 +- .../sinks/roboflow/dataset_upload/v2.py | 4 +- .../workflows/core_steps/sinks/webhook/v1.py | 12 +- .../transformations/bounding_rect/v1.py | 4 +- .../transformations/byte_tracker/v1.py | 4 +- .../transformations/byte_tracker/v2.py | 4 +- .../transformations/byte_tracker/v3.py | 4 +- .../transformations/detection_offset/v1.py | 4 +- .../transformations/detections_filter/v1.py | 8 +- .../detections_transformation/v1.py | 8 +- .../transformations/dynamic_crop/v1.py | 6 +- .../transformations/dynamic_zones/v1.py | 4 +- .../perspective_correction/v1.py | 6 +- .../stabilize_detections/v1.py | 4 +- .../stitch_ocr_detections/v1.py | 4 +- .../core_steps/visualizations/common/base.py | 4 +- .../core_steps/visualizations/halo/v1.py | 4 +- .../core_steps/visualizations/keypoint/v1.py | 4 +- .../core_steps/visualizations/line_zone/v1.py | 8 +- .../core_steps/visualizations/mask/v1.py | 4 +- .../visualizations/model_comparison/v1.py | 6 +- .../core_steps/visualizations/polygon/v1.py | 4 +- .../visualizations/polygon_zone/v1.py | 4 +- .../visualizations/reference_path/v1.py | 4 +- .../execution_engine/entities/base.py | 12 +- .../execution_engine/entities/types.py | 24 ++- .../introspection/connections_discovery.py | 4 +- .../v1/compiler/graph_constructor.py | 8 +- .../v1/dynamic_blocks/block_assembler.py | 4 +- .../step_input_assembler.py | 13 +- .../__init__.py | 16 +- .../secret_store_plugin/__init__.py | 144 +++++++++++++++ ...st_workflow_with_arbitrary_batch_inputs.py | 62 ++++--- .../test_workflow_with_scalar_selectors.py | 174 ++++++++++++++++++ .../executor/test_runtime_input_assembler.py | 55 +++--- 69 files changed, 652 insertions(+), 296 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 58647516b..70639df6e 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -320,7 +320,7 @@ we will be creating SIMD block. WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, ) @@ -331,10 +331,10 @@ we will be creating SIMD block. # all properties apart from `type` and `name` are treated as either # definitions of batch-oriented data to be processed by block or its # parameters that influence execution of steps created based on block - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) ``` @@ -343,7 +343,7 @@ we will be creating SIMD block. * line `18` defines `image_1` parameter - as manifest is prototype for Workflow Definition, the only way to tell about image to be used by step is to provide selector - we have - a specialised type in core library that can be used - `BatchOfDataSelector`. + a specialised type in core library that can be used - `BatchSelector`. If you look deeper into codebase, you will discover this is type alias constructor function - telling `pydantic` to expect string matching `$inputs.{name}` and `$steps.{name}.*` patterns respectively, additionally providing extra schema field metadata that tells Workflows ecosystem components that the `kind` of data behind selector is @@ -393,7 +393,7 @@ batch-oriented and will affect all batch elements passed to the step. WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -407,10 +407,10 @@ batch-oriented and will affect all batch elements passed to the step. # all properties apart from `type` and `name` are treated as either # definitions of batch-oriented data to be processed by block or its # parameters that influence execution of steps created based on block - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -483,7 +483,7 @@ run the block. OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -495,10 +495,10 @@ run the block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -565,7 +565,7 @@ in their inputs OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -578,10 +578,10 @@ in their inputs class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -637,7 +637,7 @@ block. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -648,10 +648,10 @@ block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -729,7 +729,7 @@ it can produce meaningful results. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -740,10 +740,10 @@ it can produce meaningful results. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -878,7 +878,7 @@ on how to use it for your block. Batch, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, FloatZeroToOne, WorkflowParameterSelector, @@ -889,10 +889,10 @@ on how to use it for your block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchOfDataSelector(kind=[IMAGE_KIND]) = Field( + image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ @@ -996,7 +996,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con ) from inference.core.workflows.execution_engine.entities.types import ( StepSelector, - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl @@ -1011,7 +1011,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/random_continue@v1"] name: str - image: BatchOfDataSelector(kind=[IMAGE_KIND]) = ImageInputField + image: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField probability: float next_steps: List[StepSelector] = Field( description="Reference to step which shall be executed if expression evaluates to true", @@ -1163,7 +1163,7 @@ def run(self, predictions: List[dict]) -> BlockResult: OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, OBJECT_DETECTION_PREDICTION_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1177,7 +1177,7 @@ def run(self, predictions: List[dict]) -> BlockResult: class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/fusion_of_predictions@v1"] name: str - predictions: List[BatchOfDataSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( + predictions: List[BatchSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( description="Selectors to step outputs", examples=[["$steps.model_1.predictions", "$steps.model_2.predictions"]], ) @@ -1248,7 +1248,7 @@ keys serve as names for those selectors. OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -1262,7 +1262,7 @@ keys serve as names for those selectors. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/named_selectors_example@v1"] name: str - data: Dict[str, BatchOfDataSelector(), WorkflowParameterSelector()] = Field( + data: Dict[str, BatchSelector(), WorkflowParameterSelector()] = Field( description="Selectors to step outputs", examples=[{"a": $steps.model_1.predictions", "b": "$Inputs.data"}], ) @@ -1367,7 +1367,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1377,8 +1377,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: BatchOfDataSelector(kind=[IMAGE_KIND]) - predictions: BatchOfDataSelector( + image: BatchSelector(kind=[IMAGE_KIND]) + predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1454,7 +1454,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1465,8 +1465,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - crops: BatchOfDataSelector(kind=[IMAGE_KIND]) - crops_predictions: BatchOfDataSelector( + crops: BatchSelector(kind=[IMAGE_KIND]) + crops_predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @@ -1538,7 +1538,7 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1550,8 +1550,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - image: BatchOfDataSelector(kind=[IMAGE_KIND]) - image_predictions: BatchOfDataSelector( + image: BatchSelector(kind=[IMAGE_KIND]) + image_predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1637,7 +1637,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1647,8 +1647,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: BatchOfDataSelector(kind=[IMAGE_KIND]) - predictions: BatchOfDataSelector( + image: BatchSelector(kind=[IMAGE_KIND]) + predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1738,7 +1738,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1749,8 +1749,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - images_crops: BatchOfDataSelector(kind=[IMAGE_KIND]) - crops_predictions: BatchOfDataSelector( + images_crops: BatchSelector(kind=[IMAGE_KIND]) + crops_predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @@ -1832,7 +1832,7 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1844,8 +1844,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - images: BatchOfDataSelector(kind=[IMAGE_KIND]) - images_predictions: BatchOfDataSelector( + images: BatchSelector(kind=[IMAGE_KIND]) + images_predictions: BatchSelector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1946,7 +1946,7 @@ that even if some elements are empty, the output lacks missing elements making i Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector + from inference.core.workflows.execution_engine.entities.types import BatchSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -1956,7 +1956,7 @@ that even if some elements are empty, the output lacks missing elements making i class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/first_non_empty_or_default@v1"] - data: List[BatchOfDataSelector()] + data: List[BatchSelector()] default: Any @classmethod @@ -2029,7 +2029,7 @@ Let's see how to request init parameters while defining block. Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector + from inference.core.workflows.execution_engine.entities.types import BatchSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -2039,7 +2039,7 @@ Let's see how to request init parameters while defining block. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/example@v1"] - data: List[BatchOfDataSelector()] + data: List[BatchSelector()] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/docs/workflows/definitions.md b/docs/workflows/definitions.md index 1bd3eed46..70e97d2a5 100644 --- a/docs/workflows/definitions.md +++ b/docs/workflows/definitions.md @@ -136,7 +136,7 @@ be more and more useful. { "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "image", "kind": ["image"] } @@ -151,7 +151,7 @@ be more and more useful. { "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "image", "kind": ["image"], "dimensionality": 2 diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 647a00bc5..8553d83c3 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -54,8 +54,8 @@ any *kind***, contrary to versions prior `v1.3.0`, which could only take `image` as batch-oriented inputs (as a result of unfortunate and not-needed coupling of kind to internal data format introduced **at the level of Execution Engine**). As a result of the change: - * **new input type was introduced:** `WorkflowDataBatch` should be used from now one to denote - batch-oriented inputs (and clearly separate them from `WorkflowParameters`). `WorkflowDataBatch` + * **new input type was introduced:** `WorkflowBatchInput` should be used from now one to denote + batch-oriented inputs (and clearly separate them from `WorkflowParameters`). `WorkflowBatchInput` let users define both *[kind](/workflows/kinds/)* of the data and it's *[dimensionality](/workflows/workflow_execution/#steps-interactions-with-data)*. New input type is effectively a superset of all previous batch-oriented inputs: `WorkflowImage` and @@ -65,11 +65,11 @@ format introduced **at the level of Execution Engine**). As a result of the chan properly. This may not be the case in the future, as in most cases batch-oriented data *kind* may be inferred by compiler (yet this feature is not implemented for now). - * **new selector type annotation was introduced** - `BatchOfDataSelector` which is supposed to + * **new selector type annotation was introduced** - `BatchSelector` which is supposed to replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector` and `WorkflowVideoMetadataSelector` in block manifests, allowing batch-oriented data to be used as block input, regardless of whether it comes from user inputs or outputs of other blocks. Mentioned old annotation types **should be assumed deprecated**, - we advise to migrate into `BatchOfDataSelector`, but that is not hard requirement. + we advise to migrate into `BatchSelector`, but that is not hard requirement. * As a result of the changes, it is now possible to **split any arbitrary workflows into multiple ones executing subsets of steps**, enabling building such tools as debuggers. @@ -119,7 +119,7 @@ subsets of steps**, enabling building such tools as debuggers. ??? Hint "New type annotation for selectors" - Blocks manifest may **optionally** be updated to use `BatchOfDataSelector` in the following way: + Blocks manifest may **optionally** be updated to use `BatchSelector` in the following way: ```python from typing import Union @@ -151,14 +151,14 @@ subsets of steps**, enabling building such tools as debuggers. from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, IMAGE_KIND, ) class BlockManifest(WorkflowBlockManifest): - reference_image: BatchOfDataSelector(kind=[IMAGE_KIND]) - predictions: BatchOfDataSelector( + reference_image: BatchSelector(kind=[IMAGE_KIND]) + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -170,7 +170,7 @@ subsets of steps**, enabling building such tools as debuggers. ??? Hint "New inputs in Workflows definitions" Anyone that used either `WorkflowImage` or `WorkflowVideoMetadata` inputs in their - Workflows definition may **optionally** migrate into `WorkflowDataBatch`. The transition + Workflows definition may **optionally** migrate into `WorkflowBatchInput`. The transition is illustrated below: ```json @@ -187,12 +187,12 @@ subsets of steps**, enabling building such tools as debuggers. { "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "image", "kind": ["image"] }, { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "video_metadata", "kind": ["video_metadata"] } diff --git a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py index d8d852658..dc3dd3f3a 100644 --- a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py +++ b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py @@ -18,7 +18,7 @@ FLOAT_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -194,9 +194,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/data_aggregator@v1"] data: Dict[ str, - Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() - ], + Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], ] = Field( description="References data to be used to construct each and every column", examples=[ diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v1.py b/inference/core/workflows/core_steps/analytics/line_counter/v1.py index b31b41195..f2cd9eeb8 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v1.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v1.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -50,7 +50,7 @@ class LineCounterManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/line_counter@v1"] metadata: WorkflowVideoMetadataSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -60,7 +60,7 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v2.py b/inference/core/workflows/core_steps/analytics/line_counter/v2.py index e54edfc52..09984112c 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v2.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v2.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -55,7 +55,7 @@ class LineCounterManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/line_counter@v2"] image: WorkflowImageSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,7 +65,7 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py index c846b9b46..91f340683 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py @@ -17,7 +17,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -53,7 +53,7 @@ class PathDeviationManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/path_deviation_analytics@v1"] metadata: WorkflowVideoMetadataSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -67,7 +67,7 @@ class PathDeviationManifest(WorkflowBlockManifest): default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py index 06c2611ff..48632f2a8 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py @@ -17,7 +17,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -54,7 +54,7 @@ class PathDeviationManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/path_deviation_analytics@v2"] image: WorkflowImageSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,7 +68,7 @@ class PathDeviationManifest(WorkflowBlockManifest): default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index f5cc36f92..e6e09d8a4 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -19,7 +19,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -58,7 +58,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], ) metadata: WorkflowVideoMetadataSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -67,7 +67,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index 623a88c28..a5bcbe551 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -18,7 +18,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -59,7 +59,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], ) - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,7 +68,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) diff --git a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py index 7422d63f5..002af0c76 100644 --- a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py @@ -10,7 +10,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -46,7 +46,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/distance_measurement@v1"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index e803b171f..56a8c2c34 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -12,7 +12,7 @@ INTEGER_KIND, RGB_COLOR_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -48,7 +48,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): ) target_color: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - BatchOfDataSelector(kind=[RGB_COLOR_KIND]), + BatchSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py index 1bca70f36..dbabbfe5a 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py @@ -9,7 +9,7 @@ BOOLEAN_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -39,11 +39,11 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/sift_comparison@v1"] - descriptor_1: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_1: BatchSelector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the first image to compare", examples=["$steps.sift.descriptors"], ) - descriptor_2: BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_2: BatchSelector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the second image to compare", examples=["$steps.sift.descriptors"], ) diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py index e3ba85b19..c2717bd01 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py @@ -17,7 +17,7 @@ INTEGER_KIND, NUMPY_ARRAY_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -52,7 +52,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): input_1: Union[ WorkflowImageSelector, StepOutputImageSelector, - BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]), + BatchSelector(kind=[NUMPY_ARRAY_KIND]), ] = Field( description="Reference to Image or SIFT descriptors from the first image to compare", examples=["$inputs.image1", "$steps.sift.descriptors"], @@ -60,7 +60,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): input_2: Union[ WorkflowImageSelector, StepOutputImageSelector, - BatchOfDataSelector(kind=[NUMPY_ARRAY_KIND]), + BatchSelector(kind=[NUMPY_ARRAY_KIND]), ] = Field( description="Reference to Image or SIFT descriptors from the second image to compare", examples=["$inputs.image2", "$steps.sift.descriptors"], diff --git a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py index f75ed53ee..e8807fb5e 100644 --- a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py @@ -14,7 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -55,7 +55,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): } ) type: Literal[f"roboflow_core/size_measurement@v1"] - reference_predictions: BatchOfDataSelector( + reference_predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -64,7 +64,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): description="Predictions from the reference object model", examples=["$segmentation.reference_predictions"], ) - object_predictions: BatchOfDataSelector( + object_predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py index 3eb011e94..8cc596166 100644 --- a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py +++ b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py @@ -10,7 +10,7 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, StepSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -63,9 +63,7 @@ class BlockManifest(WorkflowBlockManifest): ) evaluation_parameters: Dict[ str, - Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() - ], + Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[{"left": "$inputs.some"}], diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index babf16677..a8c4a2436 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -5,7 +5,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, StepSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -62,7 +62,7 @@ class RateLimiterManifest(WorkflowBlockManifest): ge=0.0, ) depends_on: Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() + WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector() ] = Field( description="Reference to any output of the the step which immediately preceeds this branch.", examples=["$steps.model"], diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index da13ca5ab..330c3477d 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -17,7 +17,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -140,7 +140,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ WorkflowImageSelector, WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), str, int, float, diff --git a/inference/core/workflows/core_steps/formatters/expression/v1.py b/inference/core/workflows/core_steps/formatters/expression/v1.py index 51a951b66..b9764feaf 100644 --- a/inference/core/workflows/core_steps/formatters/expression/v1.py +++ b/inference/core/workflows/core_steps/formatters/expression/v1.py @@ -16,7 +16,7 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -109,9 +109,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/expression@v1", "Expression"] data: Dict[ str, - Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() - ], + Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], ] = Field( description="References data to be used to construct results", examples=[ diff --git a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py index b50da32cd..74716ea59 100644 --- a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py +++ b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py @@ -6,7 +6,7 @@ Batch, OutputDefinition, ) -from inference.core.workflows.execution_engine.entities.types import BatchOfDataSelector +from inference.core.workflows.execution_engine.entities.types import BatchSelector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -35,7 +35,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/first_non_empty_or_default@v1", "FirstNonEmptyOrDefault" ] - data: List[BatchOfDataSelector()] = Field( + data: List[BatchSelector()] = Field( description="Reference data to replace empty values", examples=["$steps.my_step.predictions"], min_items=1, diff --git a/inference/core/workflows/core_steps/formatters/json_parser/v1.py b/inference/core/workflows/core_steps/formatters/json_parser/v1.py index a8dec3c80..23c82a41f 100644 --- a/inference/core/workflows/core_steps/formatters/json_parser/v1.py +++ b/inference/core/workflows/core_steps/formatters/json_parser/v1.py @@ -10,7 +10,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, LANGUAGE_MODEL_OUTPUT_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/json_parser@v1"] - raw_json: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + raw_json: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( description="The string with raw JSON to parse.", examples=[["$steps.lmm.output"]], ) diff --git a/inference/core/workflows/core_steps/formatters/property_definition/v1.py b/inference/core/workflows/core_steps/formatters/property_definition/v1.py index 88ddde4ed..f8f8eeef7 100644 --- a/inference/core/workflows/core_steps/formatters/property_definition/v1.py +++ b/inference/core/workflows/core_steps/formatters/property_definition/v1.py @@ -10,7 +10,7 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( @@ -57,7 +57,7 @@ class BlockManifest(WorkflowBlockManifest): "PropertyDefinition", "PropertyExtraction", ] - data: Union[WorkflowImageSelector, BatchOfDataSelector()] = Field( + data: Union[WorkflowImageSelector, BatchSelector()] = Field( description="Reference data to extract property from", examples=["$steps.my_step.predictions"], ) diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index 478f776cb..4f236a4c9 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -16,7 +16,7 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -70,14 +70,14 @@ class BlockManifest(WorkflowBlockManifest): description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], ) classes: Union[ WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), - BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), + BatchSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] = Field( description="List of all classes used by the model, required to " diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index 2e527e088..b0fde2d21 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -31,7 +31,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -98,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: BatchOfDataSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], @@ -106,7 +106,7 @@ class BlockManifest(WorkflowBlockManifest): classes: Optional[ Union[ WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), - BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), + BatchSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] ] = Field( diff --git a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py index 7856e7870..06138a7ab 100644 --- a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py @@ -19,7 +19,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -54,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): "roboflow_core/detections_classes_replacement@v1", "DetectionsClassesReplacement", ] - object_detection_predictions: BatchOfDataSelector( + object_detection_predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,12 +65,12 @@ class BlockManifest(WorkflowBlockManifest): description="The output of a detection model describing the bounding boxes that will have classes replaced.", examples=["$steps.my_object_detection_model.predictions"], ) - classification_predictions: BatchOfDataSelector( - kind=[CLASSIFICATION_PREDICTION_KIND] - ) = Field( - title="Classification results for crops", - description="The output of classification model for crops taken based on RoIs pointed as the other parameter", - examples=["$steps.my_classification_model.predictions"], + classification_predictions: BatchSelector(kind=[CLASSIFICATION_PREDICTION_KIND]) = ( + Field( + title="Classification results for crops", + description="The output of classification model for crops taken based on RoIs pointed as the other parameter", + examples=["$steps.my_classification_model.predictions"], + ) ) @classmethod diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index 47f50ec37..e4d125334 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -35,7 +35,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -81,7 +81,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/detections_consensus@v1", "DetectionsConsensus"] predictions_batches: List[ - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 81b918cc2..77fd4e62d 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -23,7 +23,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, StepOutputImageSelector, WorkflowImageSelector, @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): description="Image that was origin to take crops that yielded predictions.", examples=["$inputs.image"], ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index d59364e43..e1b3a974b 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( LIST_OF_VALUES_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -42,7 +42,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/dimension_collapse@v1", "DimensionCollapse"] - data: BatchOfDataSelector() = Field( + data: BatchSelector() = Field( description="Reference to step outputs at depth level n to be concatenated and moved into level n-1.", examples=["$steps.ocr_step.results"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index cedd65eea..55cebeee0 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -22,7 +22,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -218,7 +218,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ List[int], List[float], - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index 20f73d630..d5de42c4b 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -37,7 +37,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -82,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/segment_anything@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField boxes: Optional[ - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 1be95d083..09c18f17f 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -19,7 +19,7 @@ IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -68,16 +68,16 @@ class BlockManifest(WorkflowBlockManifest): description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - segmentation_mask: BatchOfDataSelector( - kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND] - ) = Field( - name="Segmentation Mask", - description="Segmentation masks", - examples=["$steps.model.predictions"], + segmentation_mask: BatchSelector(kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND]) = ( + Field( + name="Segmentation Mask", + description="Segmentation masks", + examples=["$steps.model.predictions"], + ) ) prompt: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - BatchOfDataSelector(kind=[STRING_KIND]), + BatchSelector(kind=[STRING_KIND]), str, ] = Field( description="Prompt to inpainting model (what you wish to see)", @@ -86,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): negative_prompt: Optional[ Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - BatchOfDataSelector(kind=[STRING_KIND]), + BatchSelector(kind=[STRING_KIND]), str, ] ] = Field( diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index 19355d89c..a29b27601 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -29,7 +29,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -214,9 +214,7 @@ class BlockManifest(WorkflowBlockManifest): ) message_parameters: Dict[ str, - Union[ - WorkflowParameterSelector(), BatchOfDataSelector(), str, int, float, bool - ], + Union[WorkflowParameterSelector(), BatchSelector(), str, int, float, bool], ] = Field( description="References data to be used to construct each and every column", examples=[ @@ -238,7 +236,7 @@ class BlockManifest(WorkflowBlockManifest): ], default_factory=dict, ) - attachments: Dict[str, BatchOfDataSelector(kind=[STRING_KIND, BYTES_KIND])] = Field( + attachments: Dict[str, BatchSelector(kind=[STRING_KIND, BYTES_KIND])] = Field( description="Attachments", default_factory=dict, examples=[{"report.cvs": "$steps.csv_formatter.csv_content"}], diff --git a/inference/core/workflows/core_steps/sinks/local_file/v1.py b/inference/core/workflows/core_steps/sinks/local_file/v1.py index a9d22ea94..29fcdb4fc 100644 --- a/inference/core/workflows/core_steps/sinks/local_file/v1.py +++ b/inference/core/workflows/core_steps/sinks/local_file/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -77,7 +77,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/local_file_sink@v1"] - content: BatchOfDataSelector(kind=[STRING_KIND]) = Field( + content: BatchSelector(kind=[STRING_KIND]) = Field( description="Content of the file to save", examples=["$steps.csv_formatter.csv_content"], ) diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index 91489515e..e580d1728 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -20,7 +20,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -55,7 +55,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_custom_metadata@v1", "RoboflowCustomMetadata"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -69,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): field_value: Union[ str, WorkflowParameterSelector(kind=[STRING_KIND]), - BatchOfDataSelector(kind=[STRING_KIND]), + BatchSelector(kind=[STRING_KIND]), ] = Field( description="This is the name of the metadata field you are creating", examples=["toronto", "pass", "fail"], diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index 6f353186b..401bc9504 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -63,7 +63,7 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -106,7 +106,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/roboflow_dataset_upload@v1", "RoboflowDatasetUpload"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField predictions: Optional[ - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index 646f8afb9..fdd4ce80f 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -25,7 +25,7 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, ImageInputField, StepOutputImageSelector, WorkflowImageSelector, @@ -82,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): json_schema_extra={"hidden": True}, ) predictions: Optional[ - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index 26dd197f4..74097128d 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -27,7 +27,7 @@ ROBOFLOW_PROJECT_KIND, STRING_KIND, TOP_CLASS_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -174,7 +174,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(kind=QUERY_PARAMS_KIND), - BatchOfDataSelector(kind=QUERY_PARAMS_KIND), + BatchSelector(kind=QUERY_PARAMS_KIND), str, float, bool, @@ -190,7 +190,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(kind=HEADER_KIND), - BatchOfDataSelector(kind=HEADER_KIND), + BatchSelector(kind=HEADER_KIND), str, float, bool, @@ -205,7 +205,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), str, float, bool, @@ -234,7 +234,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), str, float, bool, @@ -266,7 +266,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), str, float, bool, diff --git a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py index 6e0312d24..8481ca532 100644 --- a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py +++ b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +47,7 @@ class BoundingRectManifest(WorkflowBlockManifest): } ) type: Literal[f"roboflow_core/bounding_rect@v1"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py index 205e51a9f..05b42a674 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py @@ -12,7 +12,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) @@ -52,7 +52,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v1"] metadata: WorkflowVideoMetadataSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py index d99a2293d..23b337bc4 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py @@ -13,7 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -58,7 +58,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v2"] image: WorkflowImageSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py index 4290c3405..264bca5d9 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -73,7 +73,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v3"] image: WorkflowImageSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index 0d98c9b68..7055bcf28 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -19,7 +19,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -51,7 +51,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detection_offset@v1", "DetectionOffset"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index 2ed841f45..1c053f515 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -18,7 +18,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -71,7 +71,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detections_filter@v1", "DetectionsFilter"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -86,9 +86,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() - ], + Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 0629ca4df..1e094e2df 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -24,7 +24,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -85,7 +85,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/detections_transformation@v1", "DetectionsTransformation" ] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -101,9 +101,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchOfDataSelector() - ], + Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parameterize operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 1b381ab5f..5d0fe989c 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -22,7 +22,7 @@ OBJECT_DETECTION_PREDICTION_KIND, RGB_COLOR_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -66,7 +66,7 @@ class BlockManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -98,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): ) background_color: Union[ WorkflowParameterSelector(kind=[STRING_KIND]), - BatchOfDataSelector(kind=[RGB_COLOR_KIND]), + BatchSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index 612e80b46..577ca27ad 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -13,7 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -49,7 +49,7 @@ class DynamicZonesManifest(WorkflowBlockManifest): } ) type: Literal[f"{TYPE}", "DynamicZone"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index 6ccb46dd9..42f27f0c8 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -23,7 +23,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -62,7 +62,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/perspective_correction@v1", "PerspectiveCorrection"] predictions: Optional[ - BatchOfDataSelector( + BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -79,7 +79,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - perspective_polygons: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + perspective_polygons: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Perspective polygons (for each batch at least one must be consisting of 4 vertices)", examples=["$steps.perspective_wrap.zones"], ) diff --git a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py index 384c46e68..941a252a3 100644 --- a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowImageSelector, WorkflowParameterSelector, ) @@ -47,7 +47,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/stabilize_detections@v1"] image: WorkflowImageSelector - detections: BatchOfDataSelector( + detections: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py index f34636e1e..4eeac8e86 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py @@ -13,7 +13,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -96,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stitch_ocr_detections@v1"] - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index 77f80af48..4a0a1c1a1 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -14,7 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, StepOutputImageSelector, WorkflowImageSelector, WorkflowParameterSelector, @@ -80,7 +80,7 @@ def run( class PredictionsVisualizationManifest(VisualizationManifest, ABC): - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index e2ba6caa7..e01cec070 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -18,7 +18,7 @@ FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -46,7 +46,7 @@ class HaloManifest(ColorableVisualizationManifest): } ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index dc44f94f1..cbc710803 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -16,7 +16,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -48,7 +48,7 @@ class KeypointManifest(VisualizationManifest): } ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ KEYPOINT_DETECTION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py index d68d53865..35ddece2c 100644 --- a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py @@ -19,7 +19,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -47,7 +47,7 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points.", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) @@ -71,13 +71,13 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): default=1.0, examples=[1.0, "$inputs.text_scale"], ) - count_in: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchOfDataSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_in: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed into the line zone.", default=0, examples=["$steps.line_counter.count_in"], json_schema_extra={"always_visible": True}, ) - count_out: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchOfDataSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_out: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed out of the line zone.", default=0, examples=["$steps.line_counter.count_out"], diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index 9933d8183..ef3215e89 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -42,7 +42,7 @@ class MaskManifest(ColorableVisualizationManifest): } ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py index 8bf69451b..7a769b822 100644 --- a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py +++ b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py @@ -19,7 +19,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -52,7 +52,7 @@ class ModelComparisonManifest(VisualizationManifest): } ) - predictions_a: BatchOfDataSelector( + predictions_a: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -69,7 +69,7 @@ class ModelComparisonManifest(VisualizationManifest): examples=["GREEN", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_a"], ) - predictions_b: BatchOfDataSelector( + predictions_b: BatchSelector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index 8bad2b931..75a9fc25a 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -17,7 +17,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -44,7 +44,7 @@ class PolygonManifest(ColorableVisualizationManifest): } ) - predictions: BatchOfDataSelector( + predictions: BatchSelector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] diff --git a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py index acd9762e0..be9fdbc5e 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py @@ -17,7 +17,7 @@ FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, FloatZeroToOne, WorkflowParameterSelector, ) @@ -45,7 +45,7 @@ class PolygonZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Polygon zones (one for each batch) in a format [[(x1, y1), (x2, y2), (x3, y3), ...], ...];" " each zone must consist of more than 2 points", examples=["$inputs.zones"], diff --git a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py index 5bb385e2c..c8d76a990 100644 --- a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py +++ b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py @@ -14,7 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -45,7 +45,7 @@ class ReferencePathVisualizationManifest(VisualizationManifest): ) reference_path: Union[ list, - BatchOfDataSelector(kind=[LIST_OF_VALUES_KIND]), + BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), ] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index dc77ed9b7..09cc17d26 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -69,7 +69,7 @@ class WorkflowImage(WorkflowInput): type: Literal["WorkflowImage", "InferenceImage"] name: str kind: List[Union[str, Kind]] = Field(default=[IMAGE_KIND]) - dimensionality: int = Field(default=1) + dimensionality: int = Field(default=1, ge=1, le=1) @classmethod def is_batch_oriented(cls) -> bool: @@ -80,15 +80,15 @@ class WorkflowVideoMetadata(WorkflowInput): type: Literal["WorkflowVideoMetadata"] name: str kind: List[Union[str, Kind]] = Field(default=[VIDEO_METADATA_KIND]) - dimensionality: int = Field(default=1) + dimensionality: int = Field(default=1, ge=1, le=1) @classmethod def is_batch_oriented(cls) -> bool: return True -class WorkflowDataBatch(WorkflowInput): - type: Literal["WorkflowDataBatch"] +class WorkflowBatchInput(WorkflowInput): + type: Literal["WorkflowBatchInput"] name: str kind: List[Union[str, Kind]] = Field(default_factory=lambda: [WILDCARD_KIND]) dimensionality: int = Field(default=1) @@ -105,11 +105,11 @@ class WorkflowParameter(WorkflowInput): default_value: Optional[Union[float, int, str, bool, list, set]] = Field( default=None ) - dimensionality: int = Field(default=0) + dimensionality: int = Field(default=0, ge=0, le=0) InputType = Annotated[ - Union[WorkflowImage, WorkflowVideoMetadata, WorkflowParameter, WorkflowDataBatch], + Union[WorkflowImage, WorkflowVideoMetadata, WorkflowParameter, WorkflowBatchInput], Field(discriminator="type"), ] diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 439f32185..8faeb731e 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1023,7 +1023,8 @@ def __hash__(self) -> int: STEP_AS_SELECTED_ELEMENT = "step" STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" -BATCH_OF_DATA_AS_SELECTED_ELEMENT = "batch_of_data" +BATCH_AS_SELECTED_ELEMENT = "batch" +SCALAR_AS_SELECTED_ELEMENT = "scalar" StepSelector = Annotated[ str, @@ -1066,12 +1067,12 @@ def StepOutputSelector(kind: Optional[List[Kind]] = None): ] -def BatchOfDataSelector(kind: Optional[List[Kind]] = None): +def BatchSelector(kind: Optional[List[Kind]] = None): if kind is None: kind = [WILDCARD_KIND] json_schema_extra = { REFERENCE_KEY: True, - SELECTED_ELEMENT_KEY: BATCH_OF_DATA_AS_SELECTED_ELEMENT, + SELECTED_ELEMENT_KEY: BATCH_AS_SELECTED_ELEMENT, KIND_KEY: [k.dict() for k in kind], SELECTOR_POINTS_TO_BATCH_KEY: True, } @@ -1084,6 +1085,23 @@ def BatchOfDataSelector(kind: Optional[List[Kind]] = None): ] +def ScalarSelector(kind: Optional[List[Kind]] = None): + if kind is None: + kind = [WILDCARD_KIND] + json_schema_extra = { + REFERENCE_KEY: True, + SELECTED_ELEMENT_KEY: SCALAR_AS_SELECTED_ELEMENT, + KIND_KEY: [k.dict() for k in kind], + } + return Annotated[ + str, + StringConstraints( + pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" + ), + Field(json_schema_extra=json_schema_extra), + ] + + def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): if kind is None: kind = [WILDCARD_KIND] diff --git a/inference/core/workflows/execution_engine/introspection/connections_discovery.py b/inference/core/workflows/execution_engine/introspection/connections_discovery.py index 33ef13f68..8ad1b5831 100644 --- a/inference/core/workflows/execution_engine/introspection/connections_discovery.py +++ b/inference/core/workflows/execution_engine/introspection/connections_discovery.py @@ -2,7 +2,7 @@ from typing import Dict, Generator, List, Set, Tuple, Type from inference.core.workflows.execution_engine.entities.types import ( - BATCH_OF_DATA_AS_SELECTED_ELEMENT, + BATCH_AS_SELECTED_ELEMENT, STEP_AS_SELECTED_ELEMENT, STEP_OUTPUT_AS_SELECTED_ELEMENT, WILDCARD_KIND, @@ -43,7 +43,7 @@ def discover_blocks_connections( ) compatible_elements = { STEP_OUTPUT_AS_SELECTED_ELEMENT, - BATCH_OF_DATA_AS_SELECTED_ELEMENT, + BATCH_AS_SELECTED_ELEMENT, } coarse_input_kind2schemas = convert_kinds_mapping_to_block_wise_format( detailed_input_kind2schemas=detailed_input_kind2schemas, diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index ae7f012c6..7691c9dd4 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -695,7 +695,12 @@ def denote_data_flow_for_step( else: actual_input_is_batch = {input_definition.is_batch_oriented()} batch_input_expected = input_property2batch_expected[property_name] - if batch_input_expected == {False} and True in actual_input_is_batch: + step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() + if ( + step_accepts_batch_input + and batch_input_expected == {False} + and True in actual_input_is_batch + ): raise ExecutionGraphStructureError( public_message=f"Detected invalid reference plugged " f"into property `{property_name}` of step `{node}` - the step " @@ -705,7 +710,6 @@ def denote_data_flow_for_step( f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", context="workflow_compilation | execution_graph_construction", ) - step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() if ( step_accepts_batch_input and batch_input_expected == {True} diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index aa06154f5..5447f7f59 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -12,7 +12,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( WILDCARD_KIND, - BatchOfDataSelector, + BatchSelector, Kind, StepOutputImageSelector, StepOutputSelector, @@ -251,7 +251,7 @@ def collect_python_types_for_selectors( elif selector_type is SelectorType.STEP_OUTPUT: result.append(StepOutputSelector(kind=selector_kind)) elif selector_type is SelectorType.BATCH_OF_DATA: - result.append(BatchOfDataSelector(kind=selector_kind)) + result.append(BatchSelector(kind=selector_kind)) else: raise DynamicBlockError( public_message=f"Could not recognise selector type `{selector_type}` declared for input `{input_name}` " diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index a402539ed..55d38b48f 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -438,14 +438,21 @@ def get_non_compound_parameter_value( guard_of_indices_wrapping: GuardForIndicesWrapping, ) -> Union[Any, Optional[List[DynamicBatchIndex]]]: if not parameter.is_batch_oriented(): - input_parameter: DynamicStepInputDefinition = parameter # type: ignore if parameter.points_to_input(): + input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( selector=input_parameter.selector ) return runtime_parameters[parameter_name], None - static_input: StaticStepInputDefinition = parameter # type: ignore - return static_input.value, None + elif parameter.points_to_step_output(): + input_parameter: DynamicStepInputDefinition = parameter # type: ignore + value = execution_cache.get_non_batch_output( + selector=input_parameter.selector + ) + return value, None + else: + static_input: StaticStepInputDefinition = parameter # type: ignore + return static_input.value, None dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 0d1883994..ad5aac892 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, - BatchOfDataSelector, + BatchSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -62,7 +62,7 @@ class MixedInputWithoutBatchesBlockManifest(WorkflowBlockManifest): type: Literal["MixedInputWithoutBatchesBlock"] mixed_parameter: Union[ WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), Any, ] @@ -98,7 +98,7 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): type: Literal["MixedInputWithBatchesBlock"] mixed_parameter: Union[ WorkflowParameterSelector(), - BatchOfDataSelector(), + BatchSelector(), Any, ] @@ -138,7 +138,7 @@ class BatchInputBlockProcessingBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockProcessingBatches"] - batch_parameter: BatchOfDataSelector() + batch_parameter: BatchSelector() @classmethod def accepts_batch_input(cls) -> bool: @@ -174,7 +174,7 @@ class BatchInputBlockProcessingNotBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockNotProcessingBatches"] - batch_parameter: BatchOfDataSelector() + batch_parameter: BatchSelector() @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -239,7 +239,7 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): ) type: Literal["CompoundMixedInputBlockManifestBlock"] compound_parameter: Dict[ - str, Union[WorkflowParameterSelector(), BatchOfDataSelector(), Any] + str, Union[WorkflowParameterSelector(), BatchSelector(), Any] ] @classmethod @@ -281,7 +281,7 @@ class CompoundStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundStrictBatchBlock"] - compound_parameter: Dict[str, Union[BatchOfDataSelector()]] + compound_parameter: Dict[str, Union[BatchSelector()]] @classmethod def accepts_batch_input(cls) -> bool: @@ -320,7 +320,7 @@ class CompoundNonStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundNonStrictBatchBlock"] - compound_parameter: Dict[str, Union[BatchOfDataSelector()]] + compound_parameter: Dict[str, Union[BatchSelector()]] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py new file mode 100644 index 000000000..15b7535a1 --- /dev/null +++ b/tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py @@ -0,0 +1,144 @@ +from typing import List, Literal, Optional, Type +from uuid import uuid4 + +from pydantic import Field + +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + STRING_KIND, + BatchSelector, + ScalarSelector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + + +class SecretBlockManifest(WorkflowBlockManifest): + type: Literal["secret_store"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="secret", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class SecretStoreBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SecretBlockManifest + + def run(self) -> BlockResult: + return {"secret": "my_secret"} + + +class BlockManifest(WorkflowBlockManifest): + type: Literal["secret_store_user"] + image: BatchSelector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: ScalarSelector(kind=[STRING_KIND]) + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class SecretStoreUserBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run(self, image: Batch[WorkflowImageData], secret: str) -> BlockResult: + return [{"output": secret}] * len(image) + + +class BatchSecretBlockManifest(WorkflowBlockManifest): + type: Literal["batch_secret_store"] + image: BatchSelector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="secret", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class BatchSecretStoreBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BatchSecretBlockManifest + + def run(self, image: WorkflowImageData) -> BlockResult: + return {"secret": f"my_secret_{uuid4()}"} + + +class NonBatchSecretStoreUserBlockManifest(WorkflowBlockManifest): + type: Literal["non_batch_secret_store_user"] + image: BatchSelector(kind=[IMAGE_KIND]) = Field( + title="Input Image", + description="The input image for this step.", + ) + secret: ScalarSelector(kind=[STRING_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="output", kind=[STRING_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class NonBatchSecretStoreUserBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonBatchSecretStoreUserBlockManifest + + def run(self, image: WorkflowImageData, secret: str) -> BlockResult: + return {"output": secret} + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [ + SecretStoreBlock, + SecretStoreUserBlock, + BatchSecretStoreBlock, + NonBatchSecretStoreUserBlock, + ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index d0bf1082c..2b2f293c1 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -77,7 +77,7 @@ "inputs": [ {"type": "WorkflowImage", "name": "image"}, { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "predictions", "kind": ["object_detection_prediction"], }, @@ -103,7 +103,7 @@ "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "crops", "kind": ["image"], "dimensionality": 2, @@ -589,7 +589,7 @@ def test_debug_execution_when_empty_batch_oriented_input_provided( "inputs": [ {"type": "WorkflowImage", "name": "image"}, { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "confidence", }, ], @@ -929,7 +929,7 @@ def test_workflow_when_batch_oriented_step_feeds_mixed_input_step( "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -987,7 +987,7 @@ def test_workflow_when_batch_oriented_input_feeds_batch_input_step( "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -1045,7 +1045,7 @@ def test_workflow_when_batch_oriented_input_feeds_mixed_input_step( "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -1080,14 +1080,23 @@ def test_workflow_when_batch_oriented_input_feeds_non_batch_input_step( "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_INTO_NON_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected two outputs for two input elements" + assert result[0]["result"] == 0.4, "Expected hardcoded value" + assert result[1]["result"] == 0.4, "Expected hardcoded value" WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { @@ -1744,7 +1753,7 @@ def test_workflow_when_non_batch_oriented_input_feeds_compound_strictly_batch_or "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -1781,21 +1790,30 @@ def test_workflow_when_batch_oriented_input_feeds_compound_non_batch_oriented_st "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run( + runtime_parameters={ + "data": ["some", "other"], + } + ) + + # then + assert len(result) == 2, "Expected 2 outputs for 2 inputs" + assert result[0]["result"] == 0.4, "Expected hardcoded value" + assert result[1]["result"] == 0.4, "Expected hardcoded value" WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_MIXED_ORIENTED_STEP = { "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -1855,7 +1873,7 @@ def test_workflow_when_batch_oriented_input_feeds_compound_mixed_oriented_step( "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], @@ -1915,7 +1933,7 @@ def test_workflow_when_batch_oriented_input_feeds_compound_loosely_batch_oriente "version": "1.3.0", "inputs": [ { - "type": "WorkflowDataBatch", + "type": "WorkflowBatchInput", "name": "data", }, ], diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py b/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py new file mode 100644 index 000000000..e6c06a6a9 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py @@ -0,0 +1,174 @@ +from unittest import mock +from unittest.mock import MagicMock + +import numpy as np + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.introspection import blocks_loader + +NON_BATCH_SECRET_STORE_WORKFLOW = { + "version": "1.3.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "secret_store", + "name": "secret", + }, + { + "type": "secret_store_user", + "name": "user", + "image": "$inputs.image", + "secret": "$steps.secret.secret", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.user.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_selectors_for_batch_of_images( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=NON_BATCH_SECRET_STORE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + + # then + assert len(result) == 2 + assert ( + result[0]["result"] == "my_secret" + ), "Expected secret store value propagated into output" + assert ( + result[1]["result"] == "my_secret" + ), "Expected secret store value propagated into output" + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_selectors_for_single_image( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=NON_BATCH_SECRET_STORE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + } + ) + + # then + assert len(result) == 1 + assert ( + result[0]["result"] == "my_secret" + ), "Expected secret store value propagated into output" + + +BATCH_SECRET_STORE_WORKFLOW = { + "version": "1.3.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "batch_secret_store", + "name": "secret", + "image": "$inputs.image", + }, + { + "type": "non_batch_secret_store_user", + "name": "user", + "image": "$inputs.image", + "secret": "$steps.secret.secret", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "result", + "selector": "$steps.user.output", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batch_oriented_secret_store_for_batch_of_images( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=BATCH_SECRET_STORE_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image, dogs_image], + } + ) + + # then + assert len(result) == 2 + assert result[0]["result"].startswith( + "my_secret" + ), "Expected secret store value propagated into output" + assert result[1]["result"].startswith( + "my_secret" + ), "Expected secret store value propagated into output" + assert ( + result[0]["result"] != result[1]["result"] + ), "Expected different results for both outputs, as feature store should fire twice for two input images" diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py index 56ca839a5..b513be577 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_runtime_input_assembler.py @@ -12,7 +12,7 @@ from inference.core.workflows.errors import RuntimeInputError from inference.core.workflows.execution_engine.entities.base import ( VideoMetadata, - WorkflowDataBatch, + WorkflowBatchInput, WorkflowImage, WorkflowImageData, WorkflowParameter, @@ -580,12 +580,15 @@ def test_assemble_runtime_parameters_when_parameters_at_different_dimensionality ], } defined_inputs = [ - WorkflowDataBatch(type="WorkflowDataBatch", name="image1", kind=["image"]), - WorkflowDataBatch( - type="WorkflowDataBatch", name="image2", kind=[IMAGE_KIND], dimensionality=2 + WorkflowBatchInput(type="WorkflowBatchInput", name="image1", kind=["image"]), + WorkflowBatchInput( + type="WorkflowBatchInput", + name="image2", + kind=[IMAGE_KIND], + dimensionality=2, ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="image3", kind=["image"], dimensionality=3 + WorkflowBatchInput( + type="WorkflowBatchInput", name="image3", kind=["image"], dimensionality=3 ), ] @@ -637,23 +640,23 @@ def test_assemble_runtime_parameters_when_basic_types_are_passed_as_batch_orient "dict_param": [{"some": "dict"}, {"other": "dict"}], } defined_inputs = [ - WorkflowDataBatch( - type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name] + WorkflowBatchInput( + type="WorkflowBatchInput", name="string_param", kind=[STRING_KIND.name] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="float_param", kind=[FLOAT_KIND.name] + WorkflowBatchInput( + type="WorkflowBatchInput", name="float_param", kind=[FLOAT_KIND.name] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="int_param", kind=[INTEGER_KIND] + WorkflowBatchInput( + type="WorkflowBatchInput", name="int_param", kind=[INTEGER_KIND] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="list_param", kind=[LIST_OF_VALUES_KIND] + WorkflowBatchInput( + type="WorkflowBatchInput", name="list_param", kind=[LIST_OF_VALUES_KIND] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="boolean_param", kind=[BOOLEAN_KIND] + WorkflowBatchInput( + type="WorkflowBatchInput", name="boolean_param", kind=[BOOLEAN_KIND] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="dict_param", kind=[DICTIONARY_KIND] + WorkflowBatchInput( + type="WorkflowBatchInput", name="dict_param", kind=[DICTIONARY_KIND] ), ] @@ -682,11 +685,11 @@ def test_assemble_runtime_parameters_when_input_batch_shallower_than_declared() "float_param": [1.0, 2.0], } defined_inputs = [ - WorkflowDataBatch( - type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name] + WorkflowBatchInput( + type="WorkflowBatchInput", name="string_param", kind=[STRING_KIND.name] ), - WorkflowDataBatch( - type="WorkflowDataBatch", + WorkflowBatchInput( + type="WorkflowBatchInput", name="float_param", kind=[FLOAT_KIND.name], dimensionality=2, @@ -709,11 +712,11 @@ def test_assemble_runtime_parameters_when_input_batch_deeper_than_declared() -> "float_param": [[1.0], [2.0]], } defined_inputs = [ - WorkflowDataBatch( - type="WorkflowDataBatch", name="string_param", kind=[STRING_KIND.name] + WorkflowBatchInput( + type="WorkflowBatchInput", name="string_param", kind=[STRING_KIND.name] ), - WorkflowDataBatch( - type="WorkflowDataBatch", name="float_param", kind=[FLOAT_KIND.name] + WorkflowBatchInput( + type="WorkflowBatchInput", name="float_param", kind=[FLOAT_KIND.name] ), ] From b02502da937dcb83512b9e0e7e342d2a92da09a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 13:50:31 +0100 Subject: [PATCH 18/67] Start using scalar selector everywhere --- .../analytics/data_aggregator/v1.py | 4 +- .../core_steps/analytics/line_counter/v1.py | 6 +- .../core_steps/analytics/line_counter/v2.py | 6 +- .../core_steps/analytics/path_deviation/v1.py | 6 +- .../core_steps/analytics/path_deviation/v2.py | 6 +- .../core_steps/analytics/time_in_zone/v1.py | 10 ++-- .../core_steps/analytics/time_in_zone/v2.py | 10 ++-- .../core_steps/classical_cv/contours/v1.py | 4 +- .../classical_cv/distance_measurement/v1.py | 12 ++-- .../classical_cv/dominant_color/v1.py | 8 +-- .../core_steps/classical_cv/image_blur/v1.py | 6 +- .../classical_cv/image_preprocessing/v1.py | 10 ++-- .../classical_cv/pixel_color_count/v1.py | 6 +- .../classical_cv/sift_comparison/v1.py | 26 ++++----- .../classical_cv/sift_comparison/v2.py | 34 +++++------ .../classical_cv/size_measurement/v1.py | 4 +- .../classical_cv/template_matching/v1.py | 18 +++--- .../core_steps/classical_cv/threshold/v1.py | 8 +-- .../core_steps/flow_control/continue_if/v1.py | 4 +- .../flow_control/rate_limiter/v1.py | 6 +- .../workflows/core_steps/formatters/csv/v1.py | 4 +- .../core_steps/formatters/expression/v1.py | 4 +- .../formatters/vlm_as_classifier/v1.py | 4 +- .../formatters/vlm_as_detector/v1.py | 4 +- .../fusion/detections_consensus/v1.py | 26 ++++----- .../core_steps/fusion/detections_stitch/v1.py | 6 +- .../models/foundation/anthropic_claude/v1.py | 38 ++++++------- .../models/foundation/clip_comparison/v1.py | 12 ++-- .../models/foundation/clip_comparison/v2.py | 14 ++--- .../models/foundation/cog_vlm/v1.py | 4 +- .../models/foundation/florence2/v1.py | 32 +++++------ .../models/foundation/google_gemini/v1.py | 36 ++++++------ .../models/foundation/google_vision_ocr/v1.py | 4 +- .../core_steps/models/foundation/lmm/v1.py | 10 ++-- .../models/foundation/lmm_classifier/v1.py | 16 ++---- .../core_steps/models/foundation/openai/v1.py | 12 ++-- .../core_steps/models/foundation/openai/v2.py | 38 ++++++------- .../models/foundation/segment_anything2/v1.py | 18 +++--- .../foundation/stability_ai/inpainting/v1.py | 8 +-- .../models/foundation/yolo_world/v1.py | 10 ++-- .../roboflow/instance_segmentation/v1.py | 38 +++++-------- .../models/roboflow/keypoint_detection/v1.py | 36 +++++------- .../roboflow/multi_class_classification/v1.py | 12 ++-- .../roboflow/multi_label_classification/v1.py | 12 ++-- .../models/roboflow/object_detection/v1.py | 36 +++++------- .../core_steps/sinks/email_notification/v1.py | 56 +++++++++---------- .../core_steps/sinks/local_file/v1.py | 32 +++++------ .../sinks/roboflow/custom_metadata/v1.py | 18 +++--- .../sinks/roboflow/dataset_upload/v1.py | 36 +++++------- .../sinks/roboflow/dataset_upload/v2.py | 52 +++++++---------- .../workflows/core_steps/sinks/webhook/v1.py | 48 ++++++++-------- .../absolute_static_crop/v1.py | 22 +++----- .../transformations/byte_tracker/v1.py | 10 ++-- .../transformations/byte_tracker/v2.py | 10 ++-- .../transformations/byte_tracker/v3.py | 10 ++-- .../transformations/detection_offset/v1.py | 16 ++---- .../transformations/detections_filter/v1.py | 4 +- .../detections_transformation/v1.py | 4 +- .../transformations/dynamic_crop/v1.py | 6 +- .../transformations/dynamic_zones/v1.py | 4 +- .../transformations/image_slicer/v1.py | 26 ++++----- .../perspective_correction/v1.py | 12 ++-- .../relative_static_crop/v1.py | 36 ++++++------ .../stabilize_detections/v1.py | 6 +- .../transformations/stitch_images/v1.py | 6 +- .../stitch_ocr_detections/v1.py | 4 +- .../visualizations/background_color/v1.py | 6 +- .../core_steps/visualizations/blur/v1.py | 4 +- .../visualizations/bounding_box/v1.py | 6 +- .../core_steps/visualizations/circle/v1.py | 4 +- .../core_steps/visualizations/color/v1.py | 4 +- .../core_steps/visualizations/common/base.py | 4 +- .../visualizations/common/base_colorable.py | 20 +++---- .../core_steps/visualizations/corner/v1.py | 6 +- .../core_steps/visualizations/crop/v1.py | 8 +-- .../core_steps/visualizations/dot/v1.py | 8 +-- .../core_steps/visualizations/ellipse/v1.py | 8 +-- .../core_steps/visualizations/halo/v1.py | 6 +- .../core_steps/visualizations/keypoint/v1.py | 16 +++--- .../core_steps/visualizations/label/v1.py | 16 +++--- .../core_steps/visualizations/line_zone/v1.py | 18 +++--- .../core_steps/visualizations/mask/v1.py | 4 +- .../visualizations/model_comparison/v1.py | 10 ++-- .../core_steps/visualizations/pixelate/v1.py | 4 +- .../core_steps/visualizations/polygon/v1.py | 4 +- .../visualizations/polygon_zone/v1.py | 8 +-- .../visualizations/reference_path/v1.py | 8 +-- .../core_steps/visualizations/trace/v1.py | 8 +-- .../core_steps/visualizations/triangle/v1.py | 10 ++-- .../v1/introspection/inputs_discovery.py | 25 ++++++--- 90 files changed, 585 insertions(+), 676 deletions(-) diff --git a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py index dc3dd3f3a..73227e699 100644 --- a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py +++ b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py @@ -19,8 +19,8 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -194,7 +194,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/data_aggregator@v1"] data: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], + Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], ] = Field( description="References data to be used to construct each and every column", examples=[ diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v1.py b/inference/core/workflows/core_steps/analytics/line_counter/v1.py index f2cd9eeb8..aa0ff49ac 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v1.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v1.py @@ -15,7 +15,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, WorkflowVideoMetadataSelector, ) from inference.core.workflows.prototypes.block import ( @@ -60,11 +60,11 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Point from the detection for triggering line crossing.", default="CENTER", examples=["CENTER"], diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v2.py b/inference/core/workflows/core_steps/analytics/line_counter/v2.py index 09984112c..1d5b9cb9a 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v2.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v2.py @@ -15,8 +15,8 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -65,11 +65,11 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Point from the detection for triggering line crossing.", default="CENTER", examples=["CENTER"], diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py index 91f340683..18634c9a2 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py @@ -18,7 +18,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, WorkflowVideoMetadataSelector, ) from inference.core.workflows.prototypes.block import ( @@ -62,12 +62,12 @@ class PathDeviationManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py index 48632f2a8..e641d9ea0 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py @@ -18,8 +18,8 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -63,12 +63,12 @@ class PathDeviationManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index e6e09d8a4..68266779a 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -20,9 +20,9 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, WorkflowVideoMetadataSelector, ) from inference.core.workflows.prototypes.block import ( @@ -67,21 +67,21 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - remove_out_of_zone_detections: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + remove_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will be filtered out", default=True, examples=[True, False], ) - reset_out_of_zone_detections: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + reset_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will have time reset", default=True, examples=[True, False], diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index a5bcbe551..d0c95994c 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -19,8 +19,8 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,21 +68,21 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) - triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - remove_out_of_zone_detections: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + remove_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will be filtered out", default=True, examples=[True, False], ) - reset_out_of_zone_detections: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + reset_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will have time reset", default=True, examples=[True, False], diff --git a/inference/core/workflows/core_steps/classical_cv/contours/v1.py b/inference/core/workflows/core_steps/classical_cv/contours/v1.py index 317cfbdb4..b7ff4ed52 100644 --- a/inference/core/workflows/core_steps/classical_cv/contours/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/contours/v1.py @@ -16,9 +16,9 @@ IMAGE_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -52,7 +52,7 @@ class ImageContoursDetectionManifest(WorkflowBlockManifest): validation_alias=AliasChoices("image", "images"), ) - line_thickness: Union[WorkflowParameterSelector(kind=[INTEGER_KIND]), int] = Field( + line_thickness: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( description="Line thickness for drawing contours.", default=3, examples=[3, "$inputs.line_thickness"], diff --git a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py index 002af0c76..2151db592 100644 --- a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py @@ -11,7 +11,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -80,9 +80,7 @@ class BlockManifest(WorkflowBlockManifest): description="Select how to calibrate the measurement of distance between objects.", ) - reference_object_class_name: Union[ - str, WorkflowParameterSelector(kind=[STRING_KIND]) - ] = Field( + reference_object_class_name: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( title="Reference Object Class Name", description="The class name of the reference object.", default="reference-object", @@ -97,7 +95,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - reference_width: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( + reference_width: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( title="Width", default=2.5, description="Width of the reference object in centimeters", @@ -113,7 +111,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - reference_height: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + reference_height: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore title="Height", default=2.5, description="Height of the reference object in centimeters", @@ -129,7 +127,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - pixel_ratio: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( + pixel_ratio: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( title="Reference Pixel-to-Centimeter Ratio", description="The pixel-to-centimeter ratio of the input image, i.e. 1 centimeter = 100 pixels.", default=100, diff --git a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py index eeaedd909..bd1a40f9e 100644 --- a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py @@ -10,9 +10,9 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, RGB_COLOR_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -53,7 +53,7 @@ class DominantColorManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - color_clusters: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + color_clusters: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Color Clusters", description="Number of dominant colors to identify. Higher values increase precision but may slow processing.", default=4, @@ -61,7 +61,7 @@ class DominantColorManifest(WorkflowBlockManifest): gt=0, le=10, ) - max_iterations: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + max_iterations: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Max Iterations", description="Max number of iterations to perform. Higher values increase precision but may slow processing.", default=100, @@ -69,7 +69,7 @@ class DominantColorManifest(WorkflowBlockManifest): gt=0, le=500, ) - target_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + target_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Target Size", description="Sets target for the smallest dimension of the downsampled image in pixels. Lower values increase speed but may reduce precision.", default=100, diff --git a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py index d055c7feb..6f766a3a7 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py @@ -15,9 +15,9 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -53,7 +53,7 @@ class ImageBlurManifest(WorkflowBlockManifest): ) blur_type: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal["average", "gaussian", "median", "bilateral"], ] = Field( default="gaussian", @@ -61,7 +61,7 @@ class ImageBlurManifest(WorkflowBlockManifest): examples=["average", "$inputs.blur_type"], ) - kernel_size: Union[WorkflowParameterSelector(kind=[INTEGER_KIND]), int] = Field( + kernel_size: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( default=5, description="Size of the average pooling kernel used for blurring.", examples=[5, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py index dcecd4f5d..f2e109f4c 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py @@ -12,9 +12,9 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -57,7 +57,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): task_type: Literal["resize", "rotate", "flip"] = Field( description="Preprocessing task to be applied to the image.", ) - width: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + width: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Width", default=640, description="Width of the image to be resized to.", @@ -72,7 +72,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, }, ) - height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Height", default=640, description="Height of the image to be resized to.", @@ -87,7 +87,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, }, ) - rotation_degrees: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + rotation_degrees: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Degrees of Rotation", description="Positive value to rotate clockwise, negative value to rotate counterclockwise", default=90, @@ -103,7 +103,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): } }, ) - flip_type: Union[WorkflowParameterSelector(kind=[STRING_KIND]), Literal["vertical", "horizontal", "both"]] = Field( # type: ignore + flip_type: Union[ScalarSelector(kind=[STRING_KIND]), Literal["vertical", "horizontal", "both"]] = Field( # type: ignore title="Flip Type", description="Type of flip to be applied to the image.", default="vertical", diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index 56a8c2c34..464521576 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -13,9 +13,9 @@ RGB_COLOR_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +47,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): validation_alias=AliasChoices("image", "images"), ) target_color: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), BatchSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], @@ -57,7 +57,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): "(like (18, 17, 67)).", examples=["#431112", "$inputs.target_color", (18, 17, 67)], ) - tolerance: Union[WorkflowParameterSelector(kind=[INTEGER_KIND]), int] = Field( + tolerance: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( default=10, description="Tolerance for color matching.", examples=[10, "$inputs.tolerance"], diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py index dbabbfe5a..84f25043f 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py @@ -10,7 +10,7 @@ INTEGER_KIND, NUMPY_ARRAY_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,22 +47,20 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): description="Reference to SIFT descriptors from the second image to compare", examples=["$steps.sift.descriptors"], ) - good_matches_threshold: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( - default=50, - description="Threshold for the number of good matches to consider the images as matching", - examples=[50, "$inputs.good_matches_threshold"], - ) - ratio_threshold: Union[float, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( + good_matches_threshold: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = ( Field( - default=0.7, - description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " - "the distance of the closest match to the distance of the second closest match. A lower " - "ratio indicates stricter filtering.", - examples=[0.7, "$inputs.ratio_threshold"], + default=50, + description="Threshold for the number of good matches to consider the images as matching", + examples=[50, "$inputs.good_matches_threshold"], ) ) + ratio_threshold: Union[float, ScalarSelector(kind=[INTEGER_KIND])] = Field( + default=0.7, + description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " + "the distance of the closest match to the distance of the second closest match. A lower " + "ratio indicates stricter filtering.", + examples=[0.7, "$inputs.ratio_threshold"], + ) @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py index c2717bd01..909197f1f 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py @@ -18,9 +18,9 @@ NUMPY_ARRAY_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -65,31 +65,31 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): description="Reference to Image or SIFT descriptors from the second image to compare", examples=["$inputs.image2", "$steps.sift.descriptors"], ) - good_matches_threshold: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( - default=50, - description="Threshold for the number of good matches to consider the images as matching", - examples=[50, "$inputs.good_matches_threshold"], + good_matches_threshold: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = ( + Field( + default=50, + description="Threshold for the number of good matches to consider the images as matching", + examples=[50, "$inputs.good_matches_threshold"], + ) ) - ratio_threshold: Union[ - float, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - default=0.7, - description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " - "the distance of the closest match to the distance of the second closest match. A lower " - "ratio indicates stricter filtering.", - examples=[0.7, "$inputs.ratio_threshold"], + ratio_threshold: Union[float, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + Field( + default=0.7, + description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " + "the distance of the closest match to the distance of the second closest match. A lower " + "ratio indicates stricter filtering.", + examples=[0.7, "$inputs.ratio_threshold"], + ) ) matcher: Union[ Literal["FlannBasedMatcher", "BFMatcher"], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="FlannBasedMatcher", description="Matcher to use for comparing the SIFT descriptors", examples=["FlannBasedMatcher", "$inputs.matcher"], ) - visualize: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + visualize: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=False, description="Whether to visualize the keypoints and matches between the two images", examples=[True, "$inputs.visualize"], diff --git a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py index e8807fb5e..5d327845e 100644 --- a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py @@ -15,7 +15,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -77,7 +77,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): str, Tuple[float, float], List[float], - WorkflowParameterSelector( + ScalarSelector( kind=[STRING_KIND, LIST_OF_VALUES_KIND], ), ] = Field( # type: ignore diff --git a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py index 42a1e14d3..4d59d802b 100644 --- a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py @@ -27,9 +27,9 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, FloatZeroToOne, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -78,22 +78,20 @@ class TemplateMatchingManifest(WorkflowBlockManifest): examples=["$inputs.template", "$steps.cropping.template"], validation_alias=AliasChoices("template", "templates"), ) - matching_threshold: Union[WorkflowParameterSelector(kind=[FLOAT_KIND]), float] = ( - Field( - title="Matching Threshold", - description="The threshold value for template matching.", - default=0.8, - examples=[0.8, "$inputs.threshold"], - ) + matching_threshold: Union[ScalarSelector(kind=[FLOAT_KIND]), float] = Field( + title="Matching Threshold", + description="The threshold value for template matching.", + default=0.8, + examples=[0.8, "$inputs.threshold"], ) - apply_nms: Union[WorkflowParameterSelector(kind=[BOOLEAN_KIND]), bool] = Field( + apply_nms: Union[ScalarSelector(kind=[BOOLEAN_KIND]), bool] = Field( title="Apply NMS", description="Flag to decide if NMS should be applied at the output detections.", default=True, examples=["$inputs.apply_nms", False], ) nms_threshold: Union[ - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), FloatZeroToOne + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), FloatZeroToOne ] = Field( title="NMS threshold", description="The threshold value NMS procedure (if to be applied).", diff --git a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py index c3354c70c..c817c95fd 100644 --- a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py @@ -15,9 +15,9 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -52,7 +52,7 @@ class ImageThresholdManifest(WorkflowBlockManifest): ) threshold_type: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal[ "binary", "binary_inv", @@ -69,12 +69,12 @@ class ImageThresholdManifest(WorkflowBlockManifest): examples=["binary", "$inputs.threshold_type"], ) - thresh_value: Union[WorkflowParameterSelector(kind=[INTEGER_KIND]), int] = Field( + thresh_value: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( description="Threshold value.", examples=[127, "$inputs.thresh_value"], ) - max_value: Union[WorkflowParameterSelector(kind=[INTEGER_KIND]), int] = Field( + max_value: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( description="Maximum value for thresholding", default=255, examples=[255, "$inputs.max_value"], diff --git a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py index 8cc596166..2bd0fb73d 100644 --- a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py +++ b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py @@ -11,9 +11,9 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( BatchSelector, + ScalarSelector, StepSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): ) evaluation_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], + Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[{"left": "$inputs.some"}], diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index a8c4a2436..2031ba29d 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -6,9 +6,9 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( BatchSelector, + ScalarSelector, StepSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -61,9 +61,7 @@ class RateLimiterManifest(WorkflowBlockManifest): default=1.0, ge=0.0, ) - depends_on: Union[ - WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector() - ] = Field( + depends_on: Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()] = Field( description="Reference to any output of the the step which immediately preceeds this branch.", examples=["$steps.model"], ) diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index 330c3477d..b2ec874f0 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -18,8 +18,8 @@ from inference.core.workflows.execution_engine.entities.types import ( STRING_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -139,7 +139,7 @@ class BlockManifest(WorkflowBlockManifest): str, Union[ WorkflowImageSelector, - WorkflowParameterSelector(), + ScalarSelector(), BatchSelector(), str, int, diff --git a/inference/core/workflows/core_steps/formatters/expression/v1.py b/inference/core/workflows/core_steps/formatters/expression/v1.py index b9764feaf..d020917a4 100644 --- a/inference/core/workflows/core_steps/formatters/expression/v1.py +++ b/inference/core/workflows/core_steps/formatters/expression/v1.py @@ -17,8 +17,8 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -109,7 +109,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/expression@v1", "Expression"] data: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], + Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], ] = Field( description="References data to be used to construct results", examples=[ diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index 4f236a4c9..f24eb72ba 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -17,9 +17,9 @@ LIST_OF_VALUES_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -76,7 +76,7 @@ class BlockManifest(WorkflowBlockManifest): examples=[["$steps.lmm.output"]], ) classes: Union[ - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[LIST_OF_VALUES_KIND]), BatchSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] = Field( diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index b0fde2d21..1cfb844e1 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -32,9 +32,9 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -105,7 +105,7 @@ class BlockManifest(WorkflowBlockManifest): ) classes: Optional[ Union[ - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[LIST_OF_VALUES_KIND]), BatchSelector(kind=[LIST_OF_VALUES_KIND]), List[str], ] diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index e4d125334..1d482b03b 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -37,7 +37,7 @@ OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -94,33 +94,31 @@ class BlockManifest(WorkflowBlockManifest): examples=[["$steps.a.predictions", "$steps.b.predictions"]], validation_alias=AliasChoices("predictions_batches", "predictions"), ) - required_votes: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + required_votes: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Required number of votes for single detection from different models to accept detection as output detection", examples=[2, "$inputs.required_votes"], ) - class_aware: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + class_aware: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Flag to decide if merging detections is class-aware or only bounding boxes aware", examples=[True, "$inputs.class_aware"], ) iou_threshold: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) + FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) ] = Field( default=0.3, description="IoU threshold to consider detections from different models as matching (increasing votes for region)", examples=[0.3, "$inputs.iou_threshold"], ) - confidence: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - default=0.0, - description="Confidence threshold for merged detections", - examples=[0.1, "$inputs.confidence"], + confidence: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + Field( + default=0.0, + description="Confidence threshold for merged detections", + examples=[0.1, "$inputs.confidence"], + ) ) classes_to_consider: Optional[ - Union[List[str], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] + Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] ] = Field( default=None, description="Optional list of classes to consider in consensus procedure.", @@ -130,7 +128,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ PositiveInt, Dict[str, PositiveInt], - WorkflowParameterSelector(kind=[INTEGER_KIND, DICTIONARY_KIND]), + ScalarSelector(kind=[INTEGER_KIND, DICTIONARY_KIND]), ] ] = Field( default=None, diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 77fd4e62d..9fa51f41a 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -25,9 +25,9 @@ STRING_KIND, BatchSelector, FloatZeroToOne, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -74,7 +74,7 @@ class BlockManifest(WorkflowBlockManifest): ) overlap_filtering_strategy: Union[ Literal["none", "nms", "nmm"], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( default="nms", description="Which strategy to employ when filtering overlapping boxes. " @@ -83,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of overlap filtering strategy. If box intersection over union is above this " diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 01a3c4171..4fdca5e37 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -26,9 +26,9 @@ LIST_OF_VALUES_KIND, STRING_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -114,7 +114,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[WorkflowParameterSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Claude model", examples=["my prompt", "$inputs.prompt"], @@ -137,28 +137,28 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[ - Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] - ] = Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, + classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( + Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, + }, }, }, - }, + ) ) - api_key: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Your Antropic API key", examples=["xxx-xxx", "$inputs.antropics_api_key"], private=True, ) model_version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal[ "claude-3-5-sonnet", "claude-3-opus", "claude-3-sonnet", "claude-3-haiku" ], @@ -171,16 +171,14 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[ - Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] - ] = Field( + temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', ge=0.0, le=2.0, ) - max_image_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + max_image_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Maximum size of the image - if input has larger side, it will be downscaled, keeping aspect ratio", default=1024, ) diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py index f8cb0a355..ad7460ba5 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py @@ -32,9 +32,9 @@ PARENT_ID_KIND, PREDICTION_TYPE_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -71,12 +71,10 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/clip_comparison@v1", "ClipComparison"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - texts: Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = ( - Field( - description="List of texts to calculate similarity against each input image", - examples=[["a", "b", "c"], "$inputs.texts"], - validation_alias=AliasChoices("texts", "text"), - ) + texts: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( + description="List of texts to calculate similarity against each input image", + examples=[["a", "b", "c"], "$inputs.texts"], + validation_alias=AliasChoices("texts", "text"), ) @classmethod diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index 165b020cc..bb20d6214 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -33,9 +33,9 @@ PARENT_ID_KIND, STRING_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -70,12 +70,10 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/clip_comparison@v2"] name: str = Field(description="Unique name of step in workflows") images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - classes: Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = ( - Field( - description="List of classes to calculate similarity against each input image", - examples=[["a", "b", "c"], "$inputs.texts"], - min_items=1, - ) + classes: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( + description="List of classes to calculate similarity against each input image", + examples=[["a", "b", "c"], "$inputs.texts"], + min_items=1, ) version: Union[ Literal[ @@ -89,7 +87,7 @@ class BlockManifest(WorkflowBlockManifest): "ViT-L-14-336px", "ViT-L-14", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( default="ViT-B-16", description="Variant of CLIP model", diff --git a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py index 72e9a59d6..e44ab12ef 100644 --- a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py @@ -30,9 +30,9 @@ STRING_KIND, WILDCARD_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -69,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/cog_vlm@v1", "CogVLM"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - prompt: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the CogVLM model", examples=["my prompt", "$inputs.prompt"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 55cebeee0..cdf2ce079 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -24,9 +24,9 @@ STRING_KIND, BatchSelector, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -165,7 +165,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/florence_2@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField model_version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal["florence-2-base", "florence-2-large"], ] = Field( default="florence-2-base", @@ -189,7 +189,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[WorkflowParameterSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Florence-2 model", examples=["my prompt", "$inputs.prompt"], @@ -199,20 +199,20 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[ - Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] - ] = Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, + classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( + Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, + }, }, }, - }, + ) ) grounding_detection: Optional[ Union[ @@ -225,7 +225,7 @@ class BlockManifest(WorkflowBlockManifest): KEYPOINT_DETECTION_PREDICTION_KIND, ] ), - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[LIST_OF_VALUES_KIND]), ] ] = Field( default=None, diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index ce6908f42..935306619 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -24,9 +24,9 @@ LIST_OF_VALUES_KIND, STRING_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -123,7 +123,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[WorkflowParameterSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Gemini model", examples=["my prompt", "$inputs.prompt"], @@ -146,28 +146,28 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[ - Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] - ] = Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, + classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( + Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, + }, }, }, - }, + ) ) - api_key: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Your Google AI API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, ) model_version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal["gemini-1.5-flash", "gemini-1.5-pro"], ] = Field( default="gemini-1.5-flash", @@ -178,9 +178,7 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[ - Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] - ] = Field( + temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', diff --git a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py index b24b5633a..d374f3e5b 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py @@ -22,9 +22,9 @@ from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -80,7 +80,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Your Google Vision API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index cd8063363..7274a3109 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -36,9 +36,9 @@ STRING_KIND, WILDCARD_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -95,12 +95,12 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/lmm@v1", "LMM"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - prompt: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Holds unconstrained text prompt to LMM mode", examples=["my prompt", "$inputs.prompt"], ) lmm_type: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] + ScalarSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] ] = Field( description="Type of LMM to be used", examples=["gpt_4v", "$inputs.lmm_type"] ) @@ -115,9 +115,7 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Optional[str] - ] = Field( + remote_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( default=None, description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", examples=["xxx-xxx", "$inputs.api_key"], diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index 3f468a332..4e7971c88 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -30,9 +30,9 @@ STRING_KIND, TOP_CLASS_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -70,15 +70,13 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/lmm_for_classification@v1", "LMMForClassification"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField lmm_type: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] + ScalarSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] ] = Field( description="Type of LMM to be used", examples=["gpt_4v", "$inputs.lmm_type"] ) - classes: Union[List[str], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = ( - Field( - description="List of classes that LMM shall classify against", - examples=[["a", "b"], "$inputs.classes"], - ) + classes: Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( + description="List of classes that LMM shall classify against", + examples=[["a", "b"], "$inputs.classes"], ) lmm_config: LMMConfig = Field( default_factory=lambda: LMMConfig(), @@ -91,9 +89,7 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Optional[str] - ] = Field( + remote_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( default=None, description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", examples=["xxx-xxx", "$inputs.api_key"], diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index 78defbe28..4fcab7168 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -27,9 +27,9 @@ STRING_KIND, WILDCARD_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -73,19 +73,17 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/open_ai@v1", "OpenAI"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - prompt: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], ) - openai_api_key: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Optional[str] - ] = Field( + openai_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( description="Your OpenAI API key", examples=["xxx-xxx", "$inputs.openai_api_key"], private=True, ) openai_model: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] + ScalarSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] ] = Field( default="gpt-4o", description="Model to be used", @@ -100,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): ], ) image_detail: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] + ScalarSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] ] = Field( default="auto", description="Indicates the image's quality, with 'high' suggesting it is of high resolution and should be processed or displayed with high fidelity.", diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index b9d9ae379..87482fd76 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -23,9 +23,9 @@ LIST_OF_VALUES_KIND, STRING_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -112,7 +112,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[WorkflowParameterSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], @@ -135,35 +135,35 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[ - Union[WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] - ] = Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, + classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( + Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, + }, }, }, - }, + ) ) - api_key: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Your OpenAI API key", examples=["xxx-xxx", "$inputs.openai_api_key"], private=True, ) model_version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] + ScalarSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] ] = Field( default="gpt-4o", description="Model to be used", examples=["gpt-4o", "$inputs.openai_model"], ) image_detail: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] + ScalarSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] ] = Field( default="auto", description="Indicates the image's quality, with 'high' suggesting it is of high resolution and should be processed or displayed with high fidelity.", @@ -173,9 +173,7 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[ - Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] - ] = Field( + temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index d5de42c4b..326bf3788 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -39,9 +39,9 @@ STRING_KIND, BatchSelector, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -96,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): json_schema_extra={"always_visible": True}, ) version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), Literal["hiera_large", "hiera_small", "hiera_tiny", "hiera_b_plus"], ] = Field( default="hiera_tiny", @@ -104,18 +104,18 @@ class BlockManifest(WorkflowBlockManifest): examples=["hiera_large", "$inputs.openai_model"], ) threshold: Union[ - WorkflowParameterSelector(kind=[FLOAT_KIND]), + ScalarSelector(kind=[FLOAT_KIND]), float, ] = Field( default=0.0, description="Threshold for predicted masks scores", examples=[0.3] ) - multimask_output: Union[ - Optional[bool], WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( - default=True, - description="Flag to determine whether to use sam2 internal multimask or single mask mode. For ambiguous prompts setting to True is recomended.", - examples=[True, "$inputs.multimask_output"], + multimask_output: Union[Optional[bool], ScalarSelector(kind=[BOOLEAN_KIND])] = ( + Field( + default=True, + description="Flag to determine whether to use sam2 internal multimask or single mask mode. For ambiguous prompts setting to True is recomended.", + examples=[True, "$inputs.multimask_output"], + ) ) @classmethod diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 09c18f17f..8f09dd683 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -20,9 +20,9 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -76,7 +76,7 @@ class BlockManifest(WorkflowBlockManifest): ) ) prompt: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), BatchSelector(kind=[STRING_KIND]), str, ] = Field( @@ -85,7 +85,7 @@ class BlockManifest(WorkflowBlockManifest): ) negative_prompt: Optional[ Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), BatchSelector(kind=[STRING_KIND]), str, ] @@ -94,7 +94,7 @@ class BlockManifest(WorkflowBlockManifest): description="Negative prompt to inpainting model (what you do not wish to see)", examples=["my prompt", "$inputs.prompt"], ) - api_key: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Your Stability AI API key", examples=["xxx-xxx", "$inputs.stability_ai_api_key"], private=True, diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index ce9be725c..e6e19d702 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -29,9 +29,9 @@ STRING_KIND, FloatZeroToOne, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,9 +68,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/yolo_world_model@v1", "YoloWorldModel", "YoloWorld"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - class_names: Union[ - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), List[str] - ] = Field( + class_names: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="One or more classes that you want YOLO-World to detect. The model accepts any string as an input, though does best with short descriptions of common objects.", examples=[["person", "car", "license plate"], "$inputs.class_names"], ) @@ -85,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest): "l", "x", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( default="v2-s", description="Variant of YoloWorld model", @@ -93,7 +91,7 @@ class BlockManifest(WorkflowBlockManifest): ) confidence: Union[ Optional[FloatZeroToOne], - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.005, description="Confidence threshold for detections", diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index c3480c4cf..25153fe35 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -38,9 +38,9 @@ FloatZeroToOne, ImageInputField, RoboflowModelField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,18 +79,16 @@ class BlockManifest(WorkflowBlockManifest): "InstanceSegmentationModel", ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) - class_agnostic_nms: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=False, - description="Value to decide if NMS is to be used in class-agnostic mode.", - examples=[True, "$inputs.class_agnostic_nms"], - ) + class_agnostic_nms: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], ) class_filter: Union[ - Optional[List[str]], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) ] = Field( default=None, description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", @@ -98,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): ) confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -106,29 +104,25 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) mask_decode_mode: Union[ Literal["accurate", "tradeoff", "fast"], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( default="accurate", description="Parameter of mask decoding in prediction post-processing.", @@ -136,21 +130,19 @@ class BlockManifest(WorkflowBlockManifest): ) tradeoff_factor: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.0, description="Post-processing parameter to dictate tradeoff between fast and accurate", examples=[0.3, "$inputs.tradeoff_factor"], ) - disable_active_learning: Union[ - bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( + disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index b9d80bfed..56464a880 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -39,9 +39,9 @@ FloatZeroToOne, ImageInputField, RoboflowModelField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -80,18 +80,16 @@ class BlockManifest(WorkflowBlockManifest): "KeypointsDetectionModel", ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) - class_agnostic_nms: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=False, - description="Value to decide if NMS is to be used in class-agnostic mode.", - examples=[True, "$inputs.class_agnostic_nms"], - ) + class_agnostic_nms: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], ) class_filter: Union[ - Optional[List[str]], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) ] = Field( default=None, description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", @@ -99,7 +97,7 @@ class BlockManifest(WorkflowBlockManifest): ) confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -107,43 +105,37 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) keypoint_confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.0, description="Confidence threshold to predict keypoint as visible.", examples=[0.3, "$inputs.keypoint_confidence"], ) - disable_active_learning: Union[ - bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( + disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index eca510831..e651f6a63 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -33,9 +33,9 @@ FloatZeroToOne, ImageInputField, RoboflowModelField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -74,26 +74,24 @@ class BlockManifest(WorkflowBlockManifest): "ClassificationModel", ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", examples=[0.3, "$inputs.confidence_threshold"], ) - disable_active_learning: Union[ - bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( + disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index 78b41b32b..d65b4910b 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -33,9 +33,9 @@ FloatZeroToOne, ImageInputField, RoboflowModelField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -74,26 +74,24 @@ class BlockManifest(WorkflowBlockManifest): "MultiLabelClassificationModel", ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", examples=[0.3, "$inputs.confidence_threshold"], ) - disable_active_learning: Union[ - bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( + disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index ab8b84f26..54c46361b 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -36,9 +36,9 @@ FloatZeroToOne, ImageInputField, RoboflowModelField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -77,18 +77,18 @@ class BlockManifest(WorkflowBlockManifest): "ObjectDetectionModel", ] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) - class_agnostic_nms: Union[ - Optional[bool], WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( - default=False, - description="Value to decide if NMS is to be used in class-agnostic mode.", - examples=[True, "$inputs.class_agnostic_nms"], + class_agnostic_nms: Union[Optional[bool], ScalarSelector(kind=[BOOLEAN_KIND])] = ( + Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], + ) ) class_filter: Union[ - Optional[List[str]], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) + Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) ] = Field( default=None, description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", @@ -96,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): ) confidence: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -104,35 +104,29 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) - disable_active_learning: Union[ - bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND]) - ] = Field( + disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index a29b27601..76413aa53 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -30,7 +30,7 @@ LIST_OF_VALUES_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -178,14 +178,14 @@ class BlockManifest(WorkflowBlockManifest): "During last 5 minutes detected \{\{ $parameters.num_instances \}\} instances" ], ) - sender_email: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + sender_email: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( description="E-mail to be used to send the message", examples=["sender@gmail.com"], ) receiver_email: Union[ str, List[str], - WorkflowParameterSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] = Field( description="Destination e-mail address", examples=["receiver@gmail.com"], @@ -194,7 +194,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ str, List[str], - WorkflowParameterSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] ] = Field( default=None, @@ -205,7 +205,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ str, List[str], - WorkflowParameterSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] ] = Field( default=None, @@ -214,7 +214,7 @@ class BlockManifest(WorkflowBlockManifest): ) message_parameters: Dict[ str, - Union[WorkflowParameterSelector(), BatchSelector(), str, int, float, bool], + Union[ScalarSelector(), BatchSelector(), str, int, float, bool], ] = Field( description="References data to be used to construct each and every column", examples=[ @@ -241,16 +241,14 @@ class BlockManifest(WorkflowBlockManifest): default_factory=dict, examples=[{"report.cvs": "$steps.csv_formatter.csv_content"}], ) - smtp_server: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + smtp_server: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( description="Custom SMTP server to be used", examples=["$inputs.smtp_server", "smtp.google.com"], ) - sender_email_password: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = ( - Field( - description="Sender e-mail password be used when authenticating to SMTP server", - private=True, - examples=["$inputs.email_password"], - ) + sender_email_password: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + description="Sender e-mail password be used when authenticating to SMTP server", + private=True, + examples=["$inputs.email_password"], ) smtp_port: int = Field( default=465, @@ -260,30 +258,26 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - fire_and_forget: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Boolean flag dictating if sink is supposed to be executed in the background, " - "not waiting on status of registration before end of workflow run. Use `True` if best-effort " - "registration is needed, use `False` while debugging and if error handling is needed", - examples=["$inputs.fire_and_forget", False], - ) + fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=["$inputs.fire_and_forget", False], ) - disable_sink: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[False, "$inputs.disable_email_notifications"], ) - cooldown_seconds: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - default=5, - description="Number of seconds to wait until follow-up notification can be sent", - examples=["$inputs.cooldown_seconds", 3], - json_schema_extra={ - "always_visible": True, - }, - ) + cooldown_seconds: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + default=5, + description="Number of seconds to wait until follow-up notification can be sent", + examples=["$inputs.cooldown_seconds", 3], + json_schema_extra={ + "always_visible": True, + }, ) @field_validator("receiver_email") diff --git a/inference/core/workflows/core_steps/sinks/local_file/v1.py b/inference/core/workflows/core_steps/sinks/local_file/v1.py index 29fcdb4fc..c91dd909f 100644 --- a/inference/core/workflows/core_steps/sinks/local_file/v1.py +++ b/inference/core/workflows/core_steps/sinks/local_file/v1.py @@ -12,7 +12,7 @@ BOOLEAN_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -102,11 +102,11 @@ class BlockManifest(WorkflowBlockManifest): } }, ) - target_directory: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + target_directory: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Target directory", examples=["some/location"], ) - file_name_prefix: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + file_name_prefix: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( default="workflow_output", description="File name prefix", examples=["my_file"], @@ -114,20 +114,18 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - max_entries_per_file: Union[int, WorkflowParameterSelector(kind=[STRING_KIND])] = ( - Field( - default=1024, - description="Defines how many datapoints can be appended to a single file", - examples=[1024], - json_schema_extra={ - "relevant_for": { - "output_mode": { - "values": ["append_log"], - "required": True, - }, - } - }, - ) + max_entries_per_file: Union[int, ScalarSelector(kind=[STRING_KIND])] = Field( + default=1024, + description="Defines how many datapoints can be appended to a single file", + examples=[1024], + json_schema_extra={ + "relevant_for": { + "output_mode": { + "values": ["append_log"], + "required": True, + }, + } + }, ) @field_validator("max_entries_per_file") diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index e580d1728..92166ef95 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -21,7 +21,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,7 +68,7 @@ class BlockManifest(WorkflowBlockManifest): ) field_value: Union[ str, - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), BatchSelector(kind=[STRING_KIND]), ] = Field( description="This is the name of the metadata field you are creating", @@ -78,14 +78,12 @@ class BlockManifest(WorkflowBlockManifest): description="Name of the field to be updated in Roboflow Customer Metadata", examples=["The name of the value of the field"], ) - fire_and_forget: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Boolean flag dictating if sink is supposed to be executed in the background, " - "not waiting on status of registration before end of workflow run. Use `True` if best-effort " - "registration is needed, use `False` while debugging and if error handling is needed", - examples=[True], - ) + fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=[True], ) @classmethod diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index 401bc9504..abde51a21 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -65,9 +65,9 @@ STRING_KIND, BatchSelector, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -119,9 +119,7 @@ class BlockManifest(WorkflowBlockManifest): description="Reference q detection-like predictions", examples=["$steps.object_detection_model.predictions"], ) - target_project: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), str - ] = Field( + target_project: Union[ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( description="name of Roboflow dataset / project to be used as target for collected data", examples=["my_dataset", "$inputs.target_al_dataset"], ) @@ -166,34 +164,28 @@ class BlockManifest(WorkflowBlockManifest): description="Compression level for images registered", examples=[75], ) - registration_tags: List[ - Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] - ] = Field( + registration_tags: List[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default_factory=list, description="Tags to be attached to registered datapoints", examples=[["location-florida", "factory-name", "$inputs.dynamic_tag"]], ) - disable_sink: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[True, "$inputs.disable_active_learning"], ) - fire_and_forget: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Boolean flag dictating if sink is supposed to be executed in the background, " - "not waiting on status of registration before end of workflow run. Use `True` if best-effort " - "registration is needed, use `False` while debugging and if error handling is needed", - examples=[True], - ) + fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=[True], ) - labeling_batch_prefix: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = ( - Field( - default="workflows_data_collector", - description="Prefix of the name for labeling batches that will be registered in Roboflow app", - examples=["my_labeling_batch_name"], - ) + labeling_batch_prefix: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + default="workflows_data_collector", + description="Prefix of the name for labeling batches that will be registered in Roboflow app", + examples=["my_labeling_batch_name"], ) labeling_batches_recreation_frequency: BatchCreationFrequency = Field( default="never", diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index fdd4ce80f..f3dfa1b47 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -27,9 +27,9 @@ STRING_KIND, BatchSelector, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -69,9 +69,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/roboflow_dataset_upload@v2"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - target_project: Union[ - WorkflowParameterSelector(kind=[ROBOFLOW_PROJECT_KIND]), str - ] = Field( + target_project: Union[ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( description="name of Roboflow dataset / project to be used as target for collected data", examples=["my_dataset", "$inputs.target_al_dataset"], ) @@ -96,20 +94,18 @@ class BlockManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], json_schema_extra={"always_visible": True}, ) - data_percentage: Union[ - FloatZeroToHundred, WorkflowParameterSelector(kind=[FLOAT_KIND]) - ] = Field( - default=100, - description="Percent of data that will be saved (in range [0.0, 100.0])", - examples=[True, False, "$inputs.persist_predictions"], - ) - persist_predictions: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( + data_percentage: Union[FloatZeroToHundred, ScalarSelector(kind=[FLOAT_KIND])] = ( Field( - default=True, - description="Boolean flag to decide if predictions should be registered along with images", + default=100, + description="Percent of data that will be saved (in range [0.0, 100.0])", examples=[True, False, "$inputs.persist_predictions"], ) ) + persist_predictions: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag to decide if predictions should be registered along with images", + examples=[True, False, "$inputs.persist_predictions"], + ) minutely_usage_limit: int = Field( default=10, description="Maximum number of data registration requests per minute accounted in scope of " @@ -141,33 +137,27 @@ class BlockManifest(WorkflowBlockManifest): description="Compression level for images registered", examples=[95, 75], ) - registration_tags: List[ - Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] - ] = Field( + registration_tags: List[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( default_factory=list, description="Tags to be attached to registered datapoints", examples=[["location-florida", "factory-name", "$inputs.dynamic_tag"]], ) - disable_sink: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[True, "$inputs.disable_active_learning"], ) - fire_and_forget: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Boolean flag dictating if sink is supposed to be executed in the background, " - "not waiting on status of registration before end of workflow run. Use `True` if best-effort " - "registration is needed, use `False` while debugging and if error handling is needed", - ) + fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", ) - labeling_batch_prefix: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = ( - Field( - default="workflows_data_collector", - description="Prefix of the name for labeling batches that will be registered in Roboflow app", - examples=["my_labeling_batch_name"], - ) + labeling_batch_prefix: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + default="workflows_data_collector", + description="Prefix of the name for labeling batches that will be registered in Roboflow app", + examples=["my_labeling_batch_name"], ) labeling_batches_recreation_frequency: BatchCreationFrequency = Field( default="never", diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index 74097128d..445fbe98b 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -28,7 +28,7 @@ STRING_KIND, TOP_CLASS_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -164,7 +164,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/webhook_sink@v1"] - url: Union[WorkflowParameterSelector(kind=[STRING_KIND]), str] = Field( + url: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="URL of the resource to make request", ) method: Literal["GET", "POST", "PUT"] = Field( @@ -173,7 +173,7 @@ class BlockManifest(WorkflowBlockManifest): query_parameters: Dict[ str, Union[ - WorkflowParameterSelector(kind=QUERY_PARAMS_KIND), + ScalarSelector(kind=QUERY_PARAMS_KIND), BatchSelector(kind=QUERY_PARAMS_KIND), str, float, @@ -189,7 +189,7 @@ class BlockManifest(WorkflowBlockManifest): headers: Dict[ str, Union[ - WorkflowParameterSelector(kind=HEADER_KIND), + ScalarSelector(kind=HEADER_KIND), BatchSelector(kind=HEADER_KIND), str, float, @@ -204,7 +204,7 @@ class BlockManifest(WorkflowBlockManifest): json_payload: Dict[ str, Union[ - WorkflowParameterSelector(), + ScalarSelector(), BatchSelector(), str, float, @@ -233,7 +233,7 @@ class BlockManifest(WorkflowBlockManifest): multi_part_encoded_files: Dict[ str, Union[ - WorkflowParameterSelector(), + ScalarSelector(), BatchSelector(), str, float, @@ -265,7 +265,7 @@ class BlockManifest(WorkflowBlockManifest): form_data: Dict[ str, Union[ - WorkflowParameterSelector(), + ScalarSelector(), BatchSelector(), str, float, @@ -291,35 +291,31 @@ class BlockManifest(WorkflowBlockManifest): ], default_factory=dict, ) - request_timeout: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + request_timeout: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=2, description="Number of seconds to wait for remote API response", examples=["$inputs.request_timeout", 10], ) - fire_and_forget: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Boolean flag dictating if sink is supposed to be executed in the background, " - "not waiting on status of registration before end of workflow run. Use `True` if best-effort " - "registration is needed, use `False` while debugging and if error handling is needed", - examples=["$inputs.fire_and_forget", True], - ) + fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Boolean flag dictating if sink is supposed to be executed in the background, " + "not waiting on status of registration before end of workflow run. Use `True` if best-effort " + "registration is needed, use `False` while debugging and if error handling is needed", + examples=["$inputs.fire_and_forget", True], ) - disable_sink: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[False, "$inputs.disable_email_notifications"], ) - cooldown_seconds: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - default=5, - description="Number of seconds to wait until follow-up notification can be sent", - json_schema_extra={ - "always_visible": True, - }, - examples=["$inputs.cooldown_seconds", 10], - ) + cooldown_seconds: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + default=5, + description="Number of seconds to wait until follow-up notification can be sent", + json_schema_extra={ + "always_visible": True, + }, + examples=["$inputs.cooldown_seconds", 10], ) @classmethod diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py index 10ed0031d..db2bd8db5 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py @@ -15,9 +15,9 @@ IMAGE_KIND, INTEGER_KIND, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -48,23 +48,19 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/absolute_static_crop@v1", "AbsoluteStaticCrop"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - x_center: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - description="Center X of static crop (absolute coordinate)", - examples=[40, "$inputs.center_x"], - ) + x_center: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + description="Center X of static crop (absolute coordinate)", + examples=[40, "$inputs.center_x"], ) - y_center: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - description="Center Y of static crop (absolute coordinate)", - examples=[40, "$inputs.center_y"], - ) + y_center: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + description="Center Y of static crop (absolute coordinate)", + examples=[40, "$inputs.center_y"], ) - width: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Width of static crop (absolute value)", examples=[40, "$inputs.width"], ) - height: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Height of static crop (absolute value)", examples=[40, "$inputs.height"], ) diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py index 05b42a674..1b0009168 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py @@ -13,7 +13,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, WorkflowVideoMetadataSelector, ) from inference.core.workflows.prototypes.block import ( @@ -61,28 +61,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py index 23b337bc4..20a59be4a 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py @@ -14,8 +14,8 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -67,28 +67,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py index 264bca5d9..dfcbc11af 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py @@ -15,8 +15,8 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -82,28 +82,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index 7055bcf28..d3aec0b40 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -20,7 +20,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -61,16 +61,12 @@ class BlockManifest(WorkflowBlockManifest): description="Reference to detection-like predictions", examples=["$steps.object_detection_model.predictions"], ) - offset_width: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - description="Offset for boxes width", - examples=[10, "$inputs.offset_x"], - validation_alias=AliasChoices("offset_width", "offset_x"), - ) + offset_width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + description="Offset for boxes width", + examples=[10, "$inputs.offset_x"], + validation_alias=AliasChoices("offset_width", "offset_x"), ) - offset_height: Union[ - PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND]) - ] = Field( + offset_height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Offset for boxes height", examples=[10, "$inputs.offset_y"], validation_alias=AliasChoices("offset_height", "offset_y"), diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index 1c053f515..308eafc8b 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -19,8 +19,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -86,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], + Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 1e094e2df..7d40254e4 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -25,8 +25,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -101,7 +101,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, WorkflowParameterSelector(), BatchSelector()], + Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], ] = Field( description="References to additional parameters that may be provided in runtime to parameterize operations", examples=[ diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 5d0fe989c..9f33ba45a 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -23,9 +23,9 @@ RGB_COLOR_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,7 +79,7 @@ class BlockManifest(WorkflowBlockManifest): validation_alias=AliasChoices("predictions", "detections"), ) mask_opacity: Union[ - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), float, ] = Field( default=0.0, @@ -97,7 +97,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) background_color: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), BatchSelector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index 577ca27ad..87ae6dee6 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -14,7 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -57,7 +57,7 @@ class DynamicZonesManifest(WorkflowBlockManifest): description="", examples=["$segmentation.predictions"], ) - required_number_of_vertices: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + required_number_of_vertices: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Keep simplifying polygon until number of vertices matches this number", examples=[4, "$inputs.vertices"], ) diff --git a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py index e2ee42fcc..e44536616 100644 --- a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py +++ b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py @@ -17,9 +17,9 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -66,23 +66,19 @@ class BlockManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - slice_width: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - default=640, - description="Width of each slice, in pixels", - examples=[320, "$inputs.slice_width"], - ) + slice_width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + default=640, + description="Width of each slice, in pixels", + examples=[320, "$inputs.slice_width"], ) - slice_height: Union[PositiveInt, WorkflowParameterSelector(kind=[INTEGER_KIND])] = ( - Field( - default=640, - description="Height of each slice, in pixels", - examples=[320, "$inputs.slice_height"], - ) + slice_height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + default=640, + description="Height of each slice, in pixels", + examples=[320, "$inputs.slice_height"], ) overlap_ratio_width: Union[ Annotated[float, Field(ge=0.0, lt=1.0)], - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.2, description="Overlap ratio between consecutive slices in the width dimension", @@ -90,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): ) overlap_ratio_height: Union[ Annotated[float, Field(ge=0.0, lt=1.0)], - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.2, description="Overlap ratio between consecutive slices in the height dimension", diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index 42f27f0c8..2f5ee972c 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -24,9 +24,9 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,22 +79,22 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - perspective_polygons: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + perspective_polygons: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Perspective polygons (for each batch at least one must be consisting of 4 vertices)", examples=["$steps.perspective_wrap.zones"], ) - transformed_rect_width: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + transformed_rect_width: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Transformed rect width", default=1000, examples=[1000] ) - transformed_rect_height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + transformed_rect_height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Transformed rect height", default=1000, examples=[1000] ) - extend_perspective_polygon_by_detections_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + extend_perspective_polygon_by_detections_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description=f"If set, perspective polygons will be extended to contain all bounding boxes. Allowed values: {', '.join(sv.Position.list())}", default="", examples=["CENTER"], ) - warp_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + warp_image: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If set to True, image will be warped into transformed rect", default=False, examples=[False], diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py index d0020f927..888d7b844 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py @@ -16,9 +16,9 @@ IMAGE_KIND, FloatZeroToOne, ImageInputField, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,29 +49,27 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/relative_statoic_crop@v1", "RelativeStaticCrop"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - x_center: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - description="Center X of static crop (relative coordinate 0.0-1.0)", - examples=[0.3, "$inputs.center_x"], + x_center: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + Field( + description="Center X of static crop (relative coordinate 0.0-1.0)", + examples=[0.3, "$inputs.center_x"], + ) ) - y_center: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - description="Center Y of static crop (relative coordinate 0.0-1.0)", - examples=[0.3, "$inputs.center_y"], + y_center: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + Field( + description="Center Y of static crop (relative coordinate 0.0-1.0)", + examples=[0.3, "$inputs.center_y"], + ) ) - width: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( + width: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( description="Width of static crop (relative value 0.0-1.0)", examples=[0.3, "$inputs.width"], ) - height: Union[ - FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - description="Height of static crop (relative value 0.0-1.0)", - examples=[0.3, "$inputs.height"], + height: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + Field( + description="Height of static crop (relative value 0.0-1.0)", + examples=[0.3, "$inputs.height"], + ) ) @classmethod diff --git a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py index 941a252a3..d25d0bdfd 100644 --- a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py @@ -15,8 +15,8 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -56,14 +56,14 @@ class BlockManifest(WorkflowBlockManifest): description="Tracked detections", examples=["$steps.object_detection_model.predictions"], ) - smoothing_window_size: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + smoothing_window_size: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=3, description="Predicted movement of detection will be smoothed based on historical measurements of velocity," " this parameter controls number of historical measurements taken under account when calculating smoothed velocity." " Detections will be removed from generating smoothed predictions if they had been missing for longer than this number of frames.", examples=[5, "$inputs.smoothing_window_size"], ) - bbox_smoothing_coefficient: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + bbox_smoothing_coefficient: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.2, description="Bounding box smoothing coefficient applied when given tracker_id is present on current frame." " This parameter must be initialized with value between 0 and 1", diff --git a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py index 63cfa1a2b..25626822c 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py @@ -14,9 +14,9 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -60,14 +60,14 @@ class BlockManifest(WorkflowBlockManifest): examples=["$inputs.image2"], validation_alias=AliasChoices("image2"), ) - max_allowed_reprojection_error: Union[Optional[float], WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + max_allowed_reprojection_error: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=3, description="Advanced parameter overwriting cv.findHomography ransacReprojThreshold parameter." " Maximum allowed reprojection error to treat a point pair as an inlier." " Increasing value of this parameter for low details photo may yield better results.", examples=[3, "$inputs.min_overlap_ratio_w"], ) - count_of_best_matches_per_query_descriptor: Union[Optional[int], WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_of_best_matches_per_query_descriptor: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore default=2, description="Advanced parameter overwriting cv.BFMatcher.knnMatch `k` parameter." " Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.", diff --git a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py index 4eeac8e86..a4894bf5c 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py @@ -14,7 +14,7 @@ OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -135,7 +135,7 @@ class BlockManifest(WorkflowBlockManifest): } }, ) - tolerance: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + tolerance: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( title="Tolerance", description="The tolerance for grouping detections into the same line of text.", default=10, diff --git a/inference/core/workflows/core_steps/visualizations/background_color/v1.py b/inference/core/workflows/core_steps/visualizations/background_color/v1.py index 3bf9a55d0..829ea60b2 100644 --- a/inference/core/workflows/core_steps/visualizations/background_color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/background_color/v1.py @@ -17,7 +17,7 @@ FLOAT_ZERO_TO_ONE_KIND, STRING_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -45,13 +45,13 @@ class BackgroundColorManifest(PredictionsVisualizationManifest): } ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the background.", default="BLACK", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/blur/v1.py b/inference/core/workflows/core_steps/visualizations/blur/v1.py index 0f5f3b842..e6cbd9a34 100644 --- a/inference/core/workflows/core_steps/visualizations/blur/v1.py +++ b/inference/core/workflows/core_steps/visualizations/blur/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -36,7 +36,7 @@ class BlurManifest(PredictionsVisualizationManifest): } ) - kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + kernel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for blurring.", default=15, examples=[15, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py index 81f892852..99cf3de84 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py @@ -15,7 +15,7 @@ FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -40,13 +40,13 @@ class BoundingBoxManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the bounding box in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - roundness: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + roundness: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Roundness of the corners of the bounding box.", default=0.0, examples=[0.0, "$inputs.roundness"], diff --git a/inference/core/workflows/core_steps/visualizations/circle/v1.py b/inference/core/workflows/core_steps/visualizations/circle/v1.py index c6c1ef067..ad2826687 100644 --- a/inference/core/workflows/core_steps/visualizations/circle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/circle/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,7 +38,7 @@ class CircleManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/color/v1.py b/inference/core/workflows/core_steps/visualizations/color/v1.py index 8b41a8bbc..cbc719bb5 100644 --- a/inference/core/workflows/core_steps/visualizations/color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/color/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -39,7 +39,7 @@ class ColorManifest(ColorableVisualizationManifest): } ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the color overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index 4a0a1c1a1..d560bb43f 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -15,9 +15,9 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, BatchSelector, + ScalarSelector, StepOutputImageSelector, WorkflowImageSelector, - WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -41,7 +41,7 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - copy_image: Union[bool, WorkflowParameterSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + copy_image: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", default=True, examples=[True, False], diff --git a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py index 0d67626d1..ac8911b66 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py +++ b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py @@ -14,7 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult @@ -74,7 +74,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): # "Matplotlib Oranges_R", # "Matplotlib Reds_R", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="DEFAULT", description="Color palette to use for annotations.", @@ -83,24 +83,24 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): palette_size: Union[ int, - WorkflowParameterSelector(kind=[INTEGER_KIND]), + ScalarSelector(kind=[INTEGER_KIND]), ] = Field( # type: ignore default=10, description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", examples=[10, "$inputs.palette_size"], ) - custom_colors: Union[ - List[str], WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]) - ] = Field( # type: ignore - default=[], - description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', - examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], + custom_colors: Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( # type: ignore + default=[], + description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', + examples=[["#FF0000", "#00FF00", "#0000FF"], "$inputs.custom_colors"], + ) ) color_axis: Union[ Literal["INDEX", "CLASS", "TRACK"], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CLASS", description="Strategy to use for mapping colors to annotations.", diff --git a/inference/core/workflows/core_steps/visualizations/corner/v1.py b/inference/core/workflows/core_steps/visualizations/corner/v1.py index 09cea4966..4f940f3e8 100644 --- a/inference/core/workflows/core_steps/visualizations/corner/v1.py +++ b/inference/core/workflows/core_steps/visualizations/corner/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,13 +38,13 @@ class CornerManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=4, examples=[4, "$inputs.thickness"], ) - corner_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + corner_length: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Length of the corner lines in pixels.", default=15, examples=[15, "$inputs.corner_length"], diff --git a/inference/core/workflows/core_steps/visualizations/crop/v1.py b/inference/core/workflows/core_steps/visualizations/crop/v1.py index 4390ca4a4..7b16f66eb 100644 --- a/inference/core/workflows/core_steps/visualizations/crop/v1.py +++ b/inference/core/workflows/core_steps/visualizations/crop/v1.py @@ -15,7 +15,7 @@ FLOAT_KIND, INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -53,20 +53,20 @@ class CropManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the crop.", examples=["CENTER", "$inputs.position"], ) - scale_factor: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + scale_factor: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="The factor by which to scale the cropped image part. A factor of 2, for example, would double the size of the cropped area, allowing for a closer view of the detection.", default=2.0, examples=[2.0, "$inputs.scale_factor"], ) - border_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.border_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/dot/v1.py b/inference/core/workflows/core_steps/visualizations/dot/v1.py index c8f76fe99..c21f31774 100644 --- a/inference/core/workflows/core_steps/visualizations/dot/v1.py +++ b/inference/core/workflows/core_steps/visualizations/dot/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -54,20 +54,20 @@ class DotManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CENTER", description="The anchor position for placing the dot.", examples=["CENTER", "$inputs.position"], ) - radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the dot in pixels.", default=4, examples=[4, "$inputs.radius"], ) - outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + outline_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the dot in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py index 3c7624d1d..5fa14f2eb 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,19 +38,19 @@ class EllipseManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - start_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + start_angle: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Starting angle of the ellipse in degrees.", default=-45, examples=[-45, "$inputs.start_angle"], ) - end_angle: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + end_angle: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Ending angle of the ellipse in degrees.", default=235, examples=[235, "$inputs.end_angle"], diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index e01cec070..07738c138 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -20,7 +20,7 @@ INTEGER_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -55,13 +55,13 @@ class HaloManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the halo overlay.", default=0.8, examples=[0.8, "$inputs.opacity"], ) - kernel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + kernel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for creating the halo.", default=40, examples=[40, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index cbc710803..264cf95e7 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -17,7 +17,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -63,13 +63,13 @@ class KeypointManifest(VisualizationManifest): json_schema_extra={"always_visible": True}, ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the keypoint.", default="#A351FB", examples=["#A351FB", "green", "$inputs.color"], ) - text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + text_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Text color of the keypoint.", default="black", examples=["black", "$inputs.text_color"], @@ -81,7 +81,7 @@ class KeypointManifest(VisualizationManifest): }, }, ) - text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=0.5, examples=[0.5, "$inputs.text_scale"], @@ -94,7 +94,7 @@ class KeypointManifest(VisualizationManifest): }, ) - text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text characters.", default=1, examples=[1, "$inputs.text_thickness"], @@ -107,7 +107,7 @@ class KeypointManifest(VisualizationManifest): }, ) - text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_padding: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Padding around the text in pixels.", default=10, examples=[10, "$inputs.text_padding"], @@ -120,7 +120,7 @@ class KeypointManifest(VisualizationManifest): }, ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.thickness"], @@ -133,7 +133,7 @@ class KeypointManifest(VisualizationManifest): }, ) - radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the keypoint in pixels.", default=10, examples=[10, "$inputs.radius"], diff --git a/inference/core/workflows/core_steps/visualizations/label/v1.py b/inference/core/workflows/core_steps/visualizations/label/v1.py index b9c46360a..b54ce8c79 100644 --- a/inference/core/workflows/core_steps/visualizations/label/v1.py +++ b/inference/core/workflows/core_steps/visualizations/label/v1.py @@ -16,7 +16,7 @@ FLOAT_KIND, INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -54,7 +54,7 @@ class LabelManifest(ColorableVisualizationManifest): "Tracker Id", "Time In Zone", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="Class", description="The type of text to display.", @@ -74,38 +74,38 @@ class LabelManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_LEFT", description="The anchor position for placing the label.", examples=["CENTER", "$inputs.text_position"], ) - text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + text_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the text.", default="WHITE", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.text_color"], ) - text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=1.0, examples=[1.0, "$inputs.text_scale"], ) - text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text characters.", default=1, examples=[1, "$inputs.text_thickness"], ) - text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_padding: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Padding around the text in pixels.", default=10, examples=[10, "$inputs.text_padding"], ) - border_radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the label in pixels.", default=0, examples=[0, "$inputs.border_radius"], diff --git a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py index 35ddece2c..9d854aa8b 100644 --- a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py @@ -21,7 +21,7 @@ STRING_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -47,43 +47,43 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points.", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text in pixels.", default=1, examples=[1, "$inputs.text_thickness"], ) - text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=1.0, examples=[1.0, "$inputs.text_scale"], ) - count_in: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_in: Union[int, ScalarSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed into the line zone.", default=0, examples=["$steps.line_counter.count_in"], json_schema_extra={"always_visible": True}, ) - count_out: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_out: Union[int, ScalarSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed out of the line zone.", default=0, examples=["$steps.line_counter.count_out"], json_schema_extra={"always_visible": True}, ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.3, examples=[0.3, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index ef3215e89..bb8b7568f 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -16,7 +16,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -51,7 +51,7 @@ class MaskManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py index 7a769b822..64399f05c 100644 --- a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py +++ b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py @@ -21,7 +21,7 @@ STRING_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -63,7 +63,7 @@ class ModelComparisonManifest(VisualizationManifest): examples=["$steps.object_detection_model.predictions"], ) - color_a: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color_a: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas Model A predicted that Model B did not..", default="GREEN", examples=["GREEN", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_a"], @@ -80,19 +80,19 @@ class ModelComparisonManifest(VisualizationManifest): examples=["$steps.object_detection_model.predictions"], ) - color_b: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color_b: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas Model B predicted that Model A did not.", default="RED", examples=["RED", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_b"], ) - background_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + background_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas neither model predicted.", default="BLACK", examples=["BLACK", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the overlay.", default=0.7, examples=[0.7, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py index c00f518d4..a15ab297a 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -36,7 +36,7 @@ class PixelateManifest(PredictionsVisualizationManifest): } ) - pixel_size: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + pixel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the pixelation.", default=20, examples=[20, "$inputs.pixel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index 75a9fc25a..5a0ad2e11 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -18,7 +18,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -53,7 +53,7 @@ class PolygonManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py index be9fdbc5e..e78477109 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py @@ -19,7 +19,7 @@ STRING_KIND, BatchSelector, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -45,17 +45,17 @@ class PolygonZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Polygon zones (one for each batch) in a format [[(x1, y1), (x2, y2), (x3, y3), ...], ...];" " each zone must consist of more than 2 points", examples=["$inputs.zones"], ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.3, examples=[0.3, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py index c8d76a990..b803acf5e 100644 --- a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py +++ b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py @@ -15,7 +15,7 @@ LIST_OF_VALUES_KIND, STRING_KIND, BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -46,17 +46,17 @@ class ReferencePathVisualizationManifest(VisualizationManifest): reference_path: Union[ list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), - WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND]), + ScalarSelector(kind=[LIST_OF_VALUES_KIND]), ] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/trace/v1.py b/inference/core/workflows/core_steps/visualizations/trace/v1.py index 9d0c7d97f..0606e8f7e 100644 --- a/inference/core/workflows/core_steps/visualizations/trace/v1.py +++ b/inference/core/workflows/core_steps/visualizations/trace/v1.py @@ -15,7 +15,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -51,18 +51,18 @@ class TraceManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CENTER", description="The anchor position for placing the label.", examples=["CENTER", "$inputs.text_position"], ) - trace_length: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( + trace_length: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( default=30, description="Maximum number of historical tracked objects positions to display.", examples=[30, "$inputs.trace_length"], ) - thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the track visualization line.", default=1, examples=[1, "$inputs.track_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/triangle/v1.py b/inference/core/workflows/core_steps/visualizations/triangle/v1.py index 1f7230ad0..e222d0ab1 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/triangle/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -52,26 +52,26 @@ class TriangleManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - WorkflowParameterSelector(kind=[STRING_KIND]), + ScalarSelector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the triangle.", examples=["CENTER", "$inputs.position"], ) - base: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + base: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Base width of the triangle in pixels.", default=10, examples=[10, "$inputs.base"], ) - height: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Height of the triangle in pixels.", default=10, examples=[10, "$inputs.height"], ) - outline_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + outline_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the triangle in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], diff --git a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py index 1037c7f18..2f38e488f 100644 --- a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py +++ b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py @@ -36,11 +36,21 @@ "workflow_video_metadata": {"WorkflowVideoMetadata"}, "workflow_image": {"WorkflowImage", "InferenceImage"}, "workflow_parameter": {"WorkflowParameter", "InferenceParameter"}, + "scalar": {"WorkflowParameter", "InferenceParameter"}, + "batch": { + "WorkflowVideoMetadata", + "WorkflowImage", + "InferenceImage", + "WorkflowBatchInput", + }, } INPUT_TYPE_TO_SELECTED_ELEMENT = { - input_type: selected_element - for selected_element, input_types in SELECTED_ELEMENT_TO_INPUT_TYPE.items() - for input_type in input_types + "WorkflowVideoMetadata": {"workflow_video_metadata", "batch"}, + "WorkflowImage": {"workflow_image", "batch"}, + "InferenceImage": {"workflow_image", "batch"}, + "WorkflowParameter": {"workflow_parameter", "scalar"}, + "InferenceParameter": {"workflow_parameter", "scalar"}, + "WorkflowBatchInput": {"batch", "workflow_image", "workflow_video_metadata"}, } @@ -222,8 +232,6 @@ def grab_input_compatible_references_kinds( ) -> Dict[str, Set[str]]: matching_references = defaultdict(set) for reference in selector_definition.allowed_references: - if reference.selected_element not in SELECTED_ELEMENT_TO_INPUT_TYPE: - continue matching_references[reference.selected_element].update( k.name for k in reference.kind ) @@ -275,8 +283,11 @@ def prepare_search_results_for_detected_selectors( f"which is not supported in this installation of Workflow Execution Engine.", context="describing_workflow_inputs", ) - selected_element = INPUT_TYPE_TO_SELECTED_ELEMENT[selector_details.type] - kinds_for_element = matching_references_kinds[selected_element] + + selected_elements = INPUT_TYPE_TO_SELECTED_ELEMENT[selector_details.type] + kinds_for_element = set() + for selected_element in selected_elements: + kinds_for_element.update(matching_references_kinds[selected_element]) if not kinds_for_element: raise WorkflowDefinitionError( public_message=f"Workflow definition invalid - selector `{detected_input_selector}` declared for " From 533dd4f371d76e1a45de50ec6810bb8d10492a36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 14:04:06 +0100 Subject: [PATCH 19/67] Start using BatchSelector for input images everywhere --- .../core_steps/analytics/time_in_zone/v1.py | 5 ++--- .../core_steps/classical_cv/camera_focus/v1.py | 5 ++--- .../core_steps/classical_cv/contours/v1.py | 5 ++--- .../core_steps/classical_cv/convert_grayscale/v1.py | 5 ++--- .../core_steps/classical_cv/dominant_color/v1.py | 6 +++--- .../core_steps/classical_cv/image_blur/v1.py | 5 ++--- .../classical_cv/image_preprocessing/v1.py | 5 ++--- .../core_steps/classical_cv/pixel_color_count/v1.py | 5 ++--- .../workflows/core_steps/classical_cv/sift/v1.py | 7 +++---- .../core_steps/classical_cv/template_matching/v1.py | 8 ++++---- .../core_steps/classical_cv/threshold/v1.py | 5 ++--- .../core_steps/formatters/vlm_as_classifier/v1.py | 5 ++--- .../core_steps/formatters/vlm_as_detector/v1.py | 5 ++--- .../core_steps/fusion/detections_stitch/v1.py | 5 ++--- .../models/foundation/anthropic_claude/v1.py | 6 +++--- .../models/foundation/clip_comparison/v1.py | 6 +++--- .../models/foundation/clip_comparison/v2.py | 6 +++--- .../core_steps/models/foundation/cog_vlm/v1.py | 13 ++++--------- .../core_steps/models/foundation/florence2/v1.py | 5 ++--- .../models/foundation/google_gemini/v1.py | 6 +++--- .../models/foundation/google_vision_ocr/v1.py | 6 +++--- .../core_steps/models/foundation/lmm/v1.py | 6 +++--- .../models/foundation/lmm_classifier/v1.py | 6 +++--- .../core_steps/models/foundation/ocr/v1.py | 8 ++++---- .../core_steps/models/foundation/openai/v1.py | 7 +++---- .../core_steps/models/foundation/openai/v2.py | 6 +++--- .../models/foundation/segment_anything2/v1.py | 5 ++--- .../models/foundation/stability_ai/inpainting/v1.py | 2 +- .../core_steps/models/foundation/yolo_world/v1.py | 6 +++--- .../models/roboflow/instance_segmentation/v1.py | 6 +++--- .../models/roboflow/keypoint_detection/v1.py | 6 +++--- .../roboflow/multi_class_classification/v1.py | 6 +++--- .../roboflow/multi_label_classification/v1.py | 6 +++--- .../models/roboflow/object_detection/v1.py | 6 +++--- .../models/third_party/barcode_detection/v1.py | 8 ++++---- .../models/third_party/qr_code_detection/v1.py | 8 ++++---- .../core_steps/sinks/roboflow/dataset_upload/v1.py | 5 ++--- .../core_steps/sinks/roboflow/dataset_upload/v2.py | 5 ++--- .../transformations/absolute_static_crop/v1.py | 8 ++------ .../core_steps/transformations/dynamic_crop/v1.py | 2 +- .../core_steps/transformations/image_slicer/v1.py | 7 ++----- .../transformations/perspective_correction/v1.py | 2 +- .../transformations/relative_static_crop/v1.py | 8 ++------ .../core_steps/transformations/stitch_images/v1.py | 7 +++---- .../core_steps/visualizations/common/base.py | 2 +- .../core_steps/models/foundation/test_cogvlm.py | 1 - .../core_steps/models/foundation/test_lmm.py | 1 - 47 files changed, 114 insertions(+), 150 deletions(-) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index 68266779a..6db742fa3 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -15,14 +15,13 @@ ) from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, WorkflowVideoMetadataSelector, ) from inference.core.workflows.prototypes.block import ( @@ -52,7 +51,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/time_in_zone@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py index 2c8b71171..84b7c19b4 100644 --- a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py @@ -14,8 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, IMAGE_KIND, - StepOutputImageSelector, - WorkflowImageSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -45,7 +44,7 @@ class CameraFocusManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/contours/v1.py b/inference/core/workflows/core_steps/classical_cv/contours/v1.py index b7ff4ed52..8a7d9891f 100644 --- a/inference/core/workflows/core_steps/classical_cv/contours/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/contours/v1.py @@ -16,9 +16,8 @@ IMAGE_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -45,7 +44,7 @@ class ImageContoursDetectionManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py index 293a15967..acc7535b1 100644 --- a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py @@ -12,8 +12,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, - StepOutputImageSelector, - WorkflowImageSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -40,7 +39,7 @@ class ConvertGrayscaleManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py index bd1a40f9e..01218ab32 100644 --- a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py @@ -8,11 +8,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, INTEGER_KIND, RGB_COLOR_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +47,7 @@ class DominantColorManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py index 6f766a3a7..1fde55272 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py @@ -15,9 +15,8 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -45,7 +44,7 @@ class ImageBlurManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py index f2e109f4c..74428605c 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py @@ -12,9 +12,8 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -48,7 +47,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index 464521576..dd9bb6405 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -9,13 +9,12 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, INTEGER_KIND, RGB_COLOR_KIND, STRING_KIND, BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -40,7 +39,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/sift/v1.py b/inference/core/workflows/core_steps/classical_cv/sift/v1.py index f0b7c89ca..2d77286e8 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift/v1.py @@ -1,4 +1,4 @@ -from typing import List, Literal, Optional, Type, Union +from typing import List, Literal, Optional, Type import cv2 import numpy as np @@ -15,8 +15,7 @@ IMAGE_KEYPOINTS_KIND, IMAGE_KIND, NUMPY_ARRAY_KIND, - StepOutputImageSelector, - WorkflowImageSelector, + BatchSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -50,7 +49,7 @@ class SIFTDetectionManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py index 4d59d802b..3aa6e101c 100644 --- a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py @@ -24,12 +24,12 @@ BOOLEAN_KIND, FLOAT_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, + BatchSelector, FloatZeroToOne, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -66,13 +66,13 @@ class TemplateMatchingManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - template: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + template: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Template Image", description="The template image for this step.", examples=["$inputs.template", "$steps.cropping.template"], diff --git a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py index c817c95fd..0156b63e2 100644 --- a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py @@ -15,9 +15,8 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -44,7 +43,7 @@ class ImageThresholdManifest(WorkflowBlockManifest): } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index f24eb72ba..1305f600f 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -13,13 +13,12 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, + IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -66,7 +65,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/vlm_as_classifier@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index 1cfb844e1..68ca8a511 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -27,14 +27,13 @@ ) from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, + IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -94,7 +93,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/vlm_as_detector@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 9fa51f41a..57a8a39a7 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -20,14 +20,13 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, BatchSelector, FloatZeroToOne, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -59,7 +58,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detections_stitch@v1"] - reference_image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + reference_image: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Image that was origin to take crops that yielded predictions.", examples=["$inputs.image"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 4fdca5e37..2d5c70c7a 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -21,14 +21,14 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, + IMAGE_KIND, INTEGER_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -99,7 +99,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/anthropic_claude@v1"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py index ad7460ba5..537b00506 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py @@ -28,13 +28,13 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, LIST_OF_VALUES_KIND, PARENT_ID_KIND, PREDICTION_TYPE_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -70,7 +70,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/clip_comparison@v1", "ClipComparison"] name: str = Field(description="Unique name of step in workflows") - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField texts: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="List of texts to calculate similarity against each input image", examples=[["a", "b", "c"], "$inputs.texts"], diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index bb20d6214..e5b35d04f 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -29,13 +29,13 @@ from inference.core.workflows.execution_engine.entities.types import ( CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, LIST_OF_VALUES_KIND, PARENT_ID_KIND, STRING_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -69,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/clip_comparison@v2"] name: str = Field(description="Unique name of step in workflows") - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField classes: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="List of classes to calculate similarity against each input image", examples=[["a", "b", "c"], "$inputs.texts"], diff --git a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py index e44ab12ef..e6fc1d2c7 100644 --- a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py @@ -5,11 +5,7 @@ from pydantic import ConfigDict, Field from inference.core.entities.requests.cogvlm import CogVLMInferenceRequest -from inference.core.env import ( - LOCAL_INFERENCE_API_URL, - WORKFLOWS_REMOTE_API_TARGET, - WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, -) +from inference.core.env import LOCAL_INFERENCE_API_URL, WORKFLOWS_REMOTE_API_TARGET from inference.core.managers.base import ModelManager from inference.core.utils.image_utils import load_image from inference.core.workflows.core_steps.common.entities import StepExecutionMode @@ -25,14 +21,14 @@ ) from inference.core.workflows.execution_engine.entities.types import ( DICTIONARY_KIND, + IMAGE_KIND, IMAGE_METADATA_KIND, PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -40,7 +36,6 @@ WorkflowBlockManifest, ) from inference_sdk import InferenceHTTPClient -from inference_sdk.http.utils.iterables import make_batches NOT_DETECTED_VALUE = "not_detected" @@ -68,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/cog_vlm@v1", "CogVLM"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the CogVLM model", examples=["my prompt", "$inputs.prompt"], diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index cdf2ce079..257b43621 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -16,6 +16,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( DICTIONARY_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, LANGUAGE_MODEL_OUTPUT_KIND, @@ -25,8 +26,6 @@ BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -163,7 +162,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/florence_2@v1"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_version: Union[ ScalarSelector(kind=[STRING_KIND]), Literal["florence-2-base", "florence-2-large"], diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index 935306619..9dc049160 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -20,13 +20,13 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, + IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -108,7 +108,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/google_gemini@v1"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", diff --git a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py index d374f3e5b..5e573fcae 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py @@ -20,11 +20,11 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -61,7 +61,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/google_vision_ocr@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( description="Image to run OCR", examples=["$inputs.image", "$steps.cropping.crops"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index 7274a3109..0dd074dd8 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -31,14 +31,14 @@ ) from inference.core.workflows.execution_engine.entities.types import ( DICTIONARY_KIND, + IMAGE_KIND, IMAGE_METADATA_KIND, PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -94,7 +94,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/lmm@v1", "LMM"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Holds unconstrained text prompt to LMM mode", examples=["my prompt", "$inputs.prompt"], diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index 4e7971c88..8554afa28 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -23,16 +23,16 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, IMAGE_METADATA_KIND, LIST_OF_VALUES_KIND, PARENT_ID_KIND, PREDICTION_TYPE_KIND, STRING_KIND, TOP_CLASS_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,7 +68,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/lmm_for_classification@v1", "LMMForClassification"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField lmm_type: Union[ ScalarSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] ] = Field( diff --git a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py index 0b98c263d..fe54494ac 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py @@ -1,4 +1,4 @@ -from typing import List, Literal, Optional, Type, Union +from typing import List, Literal, Optional, Type from pydantic import ConfigDict, Field @@ -27,12 +27,12 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, PARENT_ID_KIND, PREDICTION_TYPE_KIND, STRING_KIND, + BatchSelector, ImageInputField, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -71,7 +71,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/ocr_model@v1", "OCRModel"] name: str = Field(description="Unique name of step in workflows") - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField @classmethod def accepts_batch_input(cls) -> bool: diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index 4fcab7168..787c62188 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -22,21 +22,20 @@ ) from inference.core.workflows.execution_engine.entities.types import ( DICTIONARY_KIND, + IMAGE_KIND, IMAGE_METADATA_KIND, PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, WorkflowBlockManifest, ) -from inference_sdk.http.utils.iterables import make_batches NOT_DETECTED_VALUE = "not_detected" JSON_MARKDOWN_BLOCK_PATTERN = re.compile(r"```json\n([\s\S]*?)\n```") @@ -72,7 +71,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/open_ai@v1", "OpenAI"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index 87482fd76..260a68e0d 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -19,13 +19,13 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, + IMAGE_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -98,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/open_ai@v2"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index 326bf3788..111d17eb7 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -33,6 +33,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, FLOAT_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -40,8 +41,6 @@ BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -80,7 +79,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/segment_anything@v1"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField boxes: Optional[ BatchSelector( kind=[ diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 8f09dd683..77707cb1d 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -64,7 +64,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stability_ai_inpainting@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index e6e19d702..a6e678364 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -24,14 +24,14 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -67,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/yolo_world_model@v1", "YoloWorldModel", "YoloWorld"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField class_names: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="One or more classes that you want YOLO-World to detect. The model accepts any string as an input, though does best with short descriptions of common objects.", examples=[["person", "car", "license plate"], "$inputs.class_names"], diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index 25153fe35..6fcbf61f6 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -29,18 +29,18 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -78,7 +78,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowInstanceSegmentationModel", "InstanceSegmentationModel", ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index 56464a880..9bf7c8ae7 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -30,18 +30,18 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,7 +79,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowKeypointDetectionModel", "KeypointsDetectionModel", ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index e651f6a63..a956d3d7e 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -27,15 +27,15 @@ BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -73,7 +73,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowClassificationModel", "ClassificationModel", ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index d65b4910b..504931aa9 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -27,15 +27,15 @@ BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -73,7 +73,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowMultiLabelClassificationModel", "MultiLabelClassificationModel", ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index 54c46361b..adecc4859 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -27,18 +27,18 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -76,7 +76,7 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowObjectDetectionModel", "ObjectDetectionModel", ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( RoboflowModelField ) diff --git a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py index 052406c6b..1358a9d4c 100644 --- a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py @@ -1,4 +1,4 @@ -from typing import List, Literal, Optional, Type, Union +from typing import List, Literal, Optional, Type from uuid import uuid4 import numpy as np @@ -23,9 +23,9 @@ ) from inference.core.workflows.execution_engine.entities.types import ( BAR_CODE_DETECTION_KIND, + IMAGE_KIND, + BatchSelector, ImageInputField, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -56,7 +56,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/barcode_detector@v1", "BarcodeDetector", "BarcodeDetection" ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField @classmethod def accepts_batch_input(cls) -> bool: diff --git a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py index ab4b3dfd5..d8075e7d4 100644 --- a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py @@ -1,4 +1,4 @@ -from typing import List, Literal, Optional, Type, Union +from typing import List, Literal, Optional, Type from uuid import uuid4 import cv2 @@ -22,10 +22,10 @@ WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, QR_CODE_DETECTION_KIND, + BatchSelector, ImageInputField, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -56,7 +56,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/qr_code_detector@v1", "QRCodeDetector", "QRCodeDetection" ] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField @classmethod def accepts_batch_input(cls) -> bool: diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index abde51a21..0e4612cba 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -58,6 +58,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -66,8 +67,6 @@ BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -104,7 +103,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_dataset_upload@v1", "RoboflowDatasetUpload"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField predictions: Optional[ BatchSelector( kind=[ diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index f3dfa1b47..035a5bb24 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -20,6 +20,7 @@ BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, FLOAT_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -28,8 +29,6 @@ BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,7 +67,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_dataset_upload@v2"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField target_project: Union[ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( description="name of Roboflow dataset / project to be used as target for collected data", examples=["my_dataset", "$inputs.target_al_dataset"], diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py index db2bd8db5..ed9c43dae 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py @@ -1,4 +1,3 @@ -from dataclasses import replace from typing import List, Literal, Optional, Type, Union from uuid import uuid4 @@ -6,18 +5,15 @@ from inference.core.workflows.execution_engine.entities.base import ( Batch, - ImageParentMetadata, - OriginCoordinatesSystem, OutputDefinition, WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, INTEGER_KIND, + BatchSelector, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +43,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/absolute_static_crop@v1", "AbsoluteStaticCrop"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField x_center: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( description="Center X of static crop (absolute coordinate)", examples=[40, "$inputs.center_x"], diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 9f33ba45a..3d5c19948 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -60,7 +60,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/dynamic_crop@v1", "DynamicCrop", "Crop"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + images: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Image to Crop", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py index e44536616..31cc7f4ef 100644 --- a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py +++ b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py @@ -8,8 +8,6 @@ from typing_extensions import Annotated from inference.core.workflows.execution_engine.entities.base import ( - ImageParentMetadata, - OriginCoordinatesSystem, OutputDefinition, WorkflowImageData, ) @@ -17,9 +15,8 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -60,7 +57,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/image_slicer@v1"] - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Image to slice", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index 2f5ee972c..a0218c20c 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -73,7 +73,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): default=None, examples=["$steps.object_detection_model.predictions"], ) - images: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + images: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Image to Crop", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py index 888d7b844..58554f037 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py @@ -1,4 +1,3 @@ -from dataclasses import replace from typing import List, Literal, Optional, Type, Union from uuid import uuid4 @@ -6,19 +5,16 @@ from inference.core.workflows.execution_engine.entities.base import ( Batch, - ImageParentMetadata, - OriginCoordinatesSystem, OutputDefinition, WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + BatchSelector, FloatZeroToOne, ImageInputField, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -48,7 +44,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/relative_statoic_crop@v1", "RelativeStaticCrop"] - images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField + images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField x_center: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( Field( description="Center X of static crop (relative coordinate 0.0-1.0)", diff --git a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py index 25626822c..be55d0535 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py @@ -14,9 +14,8 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, + BatchSelector, ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -48,13 +47,13 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stitch_images@v1"] - image1: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image1: BatchSelector(kind=[IMAGE_KIND]) = Field( title="First image to stitch", description="First input image for this step.", examples=["$inputs.image1"], validation_alias=AliasChoices("image1"), ) - image2: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image2: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Second image to stitch", description="Second input image for this step.", examples=["$inputs.image2"], diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index d560bb43f..94a11f001 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -35,7 +35,7 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "block_type": "visualization", } ) - image: Union[WorkflowImageSelector, StepOutputImageSelector] = Field( + image: BatchSelector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/tests/workflows/unit_tests/core_steps/models/foundation/test_cogvlm.py b/tests/workflows/unit_tests/core_steps/models/foundation/test_cogvlm.py index 1e0dda23c..11472e8ae 100644 --- a/tests/workflows/unit_tests/core_steps/models/foundation/test_cogvlm.py +++ b/tests/workflows/unit_tests/core_steps/models/foundation/test_cogvlm.py @@ -297,7 +297,6 @@ def test_try_parse_cogvlm_output_to_json_when_multiple_json_markdown_blocks_with assert result == [{"field_a": 1, "field_b": 37}, {"field_a": 2, "field_b": 47}] -@mock.patch.object(v1, "WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS", 2) @mock.patch.object(v1, "WORKFLOWS_REMOTE_API_TARGET", "self-hosted") @mock.patch.object(v1.InferenceHTTPClient, "init") def test_get_cogvlm_generations_from_remote_api( diff --git a/tests/workflows/unit_tests/core_steps/models/foundation/test_lmm.py b/tests/workflows/unit_tests/core_steps/models/foundation/test_lmm.py index 561dc5299..2ccbf214c 100644 --- a/tests/workflows/unit_tests/core_steps/models/foundation/test_lmm.py +++ b/tests/workflows/unit_tests/core_steps/models/foundation/test_lmm.py @@ -396,7 +396,6 @@ def test_try_parse_lmm_output_to_json_when_multiple_json_markdown_blocks_with_mu assert result == [{"field_a": 1, "field_b": 37}, {"field_a": 2, "field_b": 47}] -@mock.patch.object(v1, "WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS", 2) @mock.patch.object(v1, "WORKFLOWS_REMOTE_API_TARGET", "self-hosted") @mock.patch.object(v1.InferenceHTTPClient, "init") def test_get_cogvlm_generations_from_remote_api( From 5b5ec6c925cf4e311be94cd0c1d5a310d25c6e54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 15:22:35 +0100 Subject: [PATCH 20/67] Update docs and add more tests --- docs/workflows/create_workflow_block.md | 32 ++++----- docs/workflows/execution_engine_changelog.md | 28 +++++--- docs/workflows/workflow_execution.md | 60 +++++++++------- docs/workflows/workflows_compiler.md | 11 +-- .../__init__.py | 44 ++++++++++-- .../test_workflow_with_scalar_selectors.py | 68 +++++++++++++++++-- 6 files changed, 179 insertions(+), 64 deletions(-) rename tests/workflows/integration_tests/execution/stub_plugins/{secret_store_plugin => scalar_selectors_plugin}/__init__.py (74%) diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 70639df6e..c20223910 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -396,7 +396,7 @@ batch-oriented and will affect all batch elements passed to the step. BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, ) @@ -415,7 +415,7 @@ batch-oriented and will affect all batch elements passed to the step. ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -426,7 +426,7 @@ batch-oriented and will affect all batch elements passed to the step. for float values in range 0.0-1.0 - this is based on native `pydantic` mechanism and everyone could create this type annotation locally in module hosting block - * line `10` imports function `WorkflowParameterSelector(...)` capable to dynamically create + * line `10` imports function `ScalarSelector(...)` capable to dynamically create `pydantic` type annotation for selector to workflow input parameter (matching format `$inputs.param_name`), declaring union of kinds compatible with the field @@ -435,7 +435,7 @@ batch-oriented and will affect all batch elements passed to the step. * in line `27` we start defining parameter called `similarity_threshold`. Manifest will accept either float values (in range `[0.0-1.0]`) or selector to workflow input of `kind` [`float_zero_to_one`](/workflows/kinds/float_zero_to_one). Please point out on how - function creating type annotation (`WorkflowParameterSelector(...)`) is used - + function creating type annotation (`ScalarSelector(...)`) is used - in particular, expected `kind` of data is passed as list of `kinds` - representing union of expected data `kinds`. @@ -486,7 +486,7 @@ run the block. BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -503,7 +503,7 @@ run the block. ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -568,7 +568,7 @@ in their inputs BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, WILDCARD_KIND, @@ -586,7 +586,7 @@ in their inputs ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -640,7 +640,7 @@ block. BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -656,7 +656,7 @@ block. ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -732,7 +732,7 @@ it can produce meaningful results. BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -748,7 +748,7 @@ it can produce meaningful results. ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -881,7 +881,7 @@ on how to use it for your block. BatchSelector, IMAGE_KIND, FloatZeroToOne, - WorkflowParameterSelector, + ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -897,7 +897,7 @@ on how to use it for your block. ) similarity_threshold: Union[ FloatZeroToOne, - WorkflowParameterSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -1249,7 +1249,7 @@ keys serve as names for those selectors. ) from inference.core.workflows.execution_engine.entities.types import ( BatchSelector, - WorkflowParameterSelector, + ScalarSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1262,7 +1262,7 @@ keys serve as names for those selectors. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/named_selectors_example@v1"] name: str - data: Dict[str, BatchSelector(), WorkflowParameterSelector()] = Field( + data: Dict[str, BatchSelector(), ScalarSelector()] = Field( description="Selectors to step outputs", examples=[{"a": $steps.model_1.predictions", "b": "$Inputs.data"}], ) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 8553d83c3..f96a52eb9 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -65,11 +65,15 @@ format introduced **at the level of Execution Engine**). As a result of the chan properly. This may not be the case in the future, as in most cases batch-oriented data *kind* may be inferred by compiler (yet this feature is not implemented for now). - * **new selector type annotation was introduced** - `BatchSelector` which is supposed to - replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector` and `WorkflowVideoMetadataSelector` - in block manifests, allowing batch-oriented data to be used as block input, regardless of whether it comes - from user inputs or outputs of other blocks. Mentioned old annotation types **should be assumed deprecated**, - we advise to migrate into `BatchSelector`, but that is not hard requirement. + * **new selector types annotation were introduced** - `BatchSelector` and `ScalarSelector`. + `BatchSelector` is supposed to replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector` + and `WorkflowVideoMetadataSelector` in block manifests, allowing batch-oriented data to be used as block input, + regardless of whether it comes from user inputs or outputs of other blocks. + `ScalarSelector` is meant to replace `WorkflowParameterSelector`, providing a way to input + non-natch oriented data into the block both from **workflow inputs** (via `WorkflowParameter` input) or + from **steps outputs** - such that steps can now directly feed parameters into other steps. + Mentioned old annotation types **should be assumed deprecated**, we advise to migrate into `BatchSelector`, + and `ScalarSelector` but that is not hard requirement. * As a result of the changes, it is now possible to **split any arbitrary workflows into multiple ones executing subsets of steps**, enabling building such tools as debuggers. @@ -78,8 +82,8 @@ subsets of steps**, enabling building such tools as debuggers. * `WorkflowImage` and `WorkflowVideoMetadata` inputs will be removed from Workflows ecosystem. - * `StepOutputSelector, `WorkflowImageSelector`, `StepOutputImageSelector` and `WorkflowVideoMetadataSelector` - type annotations used in block manifests will be removed from Workflows ecosystem. + * `StepOutputSelector, `WorkflowImageSelector`, `StepOutputImageSelector`, `WorkflowVideoMetadataSelector` + and `WorkflowParameterSelector` type annotations used in block manifests will be removed from Workflows ecosystem. ### Migration guide @@ -127,9 +131,11 @@ subsets of steps**, enabling building such tools as debuggers. from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, + FLOAT_KIND, WorkflowImageSelector, StepOutputImageSelector, StepOutputSelector, + WorkflowParameterSelector, ) @@ -142,17 +148,20 @@ subsets of steps**, enabling building such tools as debuggers. INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) + confidence: WorkflowParameterSelector(kind=[FLOAT_KIND]) ``` should just be changed into: - ```{ .py linenums="1" hl_lines="5 11 12"} + ```{ .py linenums="1" hl_lines="7 8 13 14 20"} from inference.core.workflows.prototypes.block import WorkflowBlockManifest from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + FLOAT_KIND, IMAGE_KIND, + BatchSelector, + ScalarSelector, ) @@ -164,6 +173,7 @@ subsets of steps**, enabling building such tools as debuggers. INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) + confidence: ScalarSelector(kind=[FLOAT_KIND]) ``` diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index 96eefe419..45358e529 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -56,15 +56,18 @@ Input data in a Workflow can be divided into two types: - Batch-Oriented Data to be processed: Main data to be processed, which you expect to derive results from (for instance: making inference with your model) -- Parameters: These are single values used for specific settings or configurations. +- Scalars: These are single values used for specific settings or configurations. -To clarify the difference, consider this simple Python function: +Thinking about standard data processing, like the one presented below, you may find the distinction +between scalars and batch-oriented data artificial. ```python def is_even(number: int) -> bool: return number % 2 == 0 ``` -You use this function like this, providing one number at a time: + +You can easily submit different values as `number` parameter and do not bother associating the +parameter into one of the two categories. ```python is_even(number=1) @@ -72,14 +75,15 @@ is_even(number=2) is_even(number=3) ``` -The situation becomes more complex with machine learning models. Unlike a simple function like `is_even(...)`, +The situation becomes more complicated with machine learning models. Unlike a simple function like `is_even(...)`, which processes one number at a time, ML models often handle multiple pieces of data at once. For example, instead of providing just one image to a classification model, you can usually submit a list of images and -receive predictions for all of them at once. +receive predictions for all of them at once performing **the same operation** for each image. This is different from our `is_even(...)` function, which would need to be called separately for each number to get a list of results. The difference comes from how ML models work, especially how -GPUs process data - applying the same operation to many pieces of data simultaneously. +GPUs process data - applying the same operation to many pieces of data simultaneously, executing +[Single Instruction Multiple Data](https://en.wikipedia.org/wiki/Single_instruction,_multiple_data) operations.
@@ -91,10 +95,12 @@ for number in [1, 2, 3, 4]: results.append(is_even(number)) ``` -In Workflows, similar methods are used to handle non-batch-oriented steps facing batch input data. But what if -step expects batch-oriented data and is given singular data point? Let's look at inference process from example -classification model: +In Workflows, usually you do not need to worry about broadcasting the operations into batches of data - +Execution Engine is doing that for you behind the scenes, but once you understand the role of *batch-oriented* +data, let's think if all data can be represented as batches. +Standard way of inferring predictions from classification model can be illustrated with the following +pseudo-code: ```python images = [PIL.Image(...), PIL.Image(...), PIL.Image(...), PIL.Image(...)] model = MyClassificationModel() @@ -102,38 +108,44 @@ model = MyClassificationModel() predictions = model.infer(images=images, confidence_threshold=0.5) ``` -As you may imagine, this code has chance to run correctly, as there is substantial difference in meaning of -`images` and `confidence_threshold` parameter. Former is batch of data to apply single operation (prediction -from a model) and the latter is parameter influencing the processing for all elements in the batch. Virtually, -`confidence_threshold` gets propagated (broadcast) at each element of `images` list with the same value, -as if `confidence_threshold` was the following list: `[0.5, 0.5, 0.5, 0.5]`. +You can probably spot the difference between `images` and `confidence_threshold`. +Former is batch of data to apply single operation (prediction from a model) and the latter is parameter +influencing the processing for all elements in the batch and this type of data we call **scalars**. + +!!! Tip "Nature of *batches* and *scalars*" + + What we call *scalar* in Workflows ecosystem is not 100% equivalent to the mathematical + term which is usually associated to "a single value", but in Workflows we prefer slightly different + definition. -As mentioned earlier, Workflow inputs can be of two types: + In the Workflows ecosystem, a *scalar* is a piece of data that stays constant, regardless of how many + elements are processed. There is nothing that prevents from having a list of objects as a *scalar* value. + For example, if you have a list of input images and a fixed list of reference images, + the reference images remain unchanged as you process each input. Thus, the reference images are considered + *scalar* data, while the list of input images is *batch-oriented*. -- `WorkflowImage`: This is similar to the images parameter in our example. +To illustrate the distinction, Workflow definitions hold inputs of the two categories: + +- **Scalar inputs** - like `WorkflowParameter` + +- **Batch inputs** - like `WorkflowImage`, `WorkflowVideoMetadata` or `WorkflowBatchInput` -- `WorkflowParameters`: This works like the confidence_threshold. When you provide a single image as a `WorkflowImage` input, it is automatically expanded to form a batch. If your Workflow definition includes multiple `WorkflowImage` placeholders, the actual data you provide for execution must have the same batch size for all these inputs. The only exception is when you submit a single image; it will be broadcast to fit the batch size requirements of other inputs. -Currently, `WorkflowImage` is the only type of batch-oriented input you can use in Workflows. -This was introduced because the ecosystem started in the Computer Vision field, where images are a key data type. -However, as the field evolves and expands to include multi-modal models (LMMs) and other types of data, -you can expect additional batch-oriented data types to be introduced in the future. - ## Steps interactions with data If we asked you about the nature of step outputs in these scenarios: -- **A**: The step receives non-batch-oriented parameters as input. +- **A**: The step receives only scalar parameters as input. - **B**: The step receives batch-oriented data as input. -- **C**: The step receives both non-batch-oriented parameters and batch-oriented data as input. +- **C**: The step receives both scalar parameters and batch-oriented data as input. You would likely say: diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 2276eafe1..593c33ace 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -235,15 +235,16 @@ can decide separately for each element in the batch which ones will proceed and #### Batch-orientation compatibility As it was outlined, Workflows define batch-oriented data and parameters. -Some blocks may require batch-oriented inputs, but that is always required. When +Some blocks may require batch-oriented inputs, but that is not always required. When block do not require batch-oriented input, it will be fed only with parameters and will produce a single result. Such outputs can be used as inputs to other steps, but only if block class returns `False` from `block.accepts_batch_input(...)` method. The -constraint is introduced to ensure stability of blocks interface **for now, and we plan to fix -this in the future releases**. +constraint is introduced to ensure stability of blocks interface. +If there is a need for such steps connection, this is usually an indicator that +the input parameter should not be marked with `BatchSelector(...)` type annotation, +but rather with `ScalarSelecector(...)` - **if this assumption is wrong, please let us +know in GitHub issues**. -On the other hand, batch-oriented outputs are prevented to be feed into -inputs that do expect non-batch parameters. ## Initializing Workflow steps from blocks diff --git a/tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py similarity index 74% rename from tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py rename to tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py index 15b7535a1..3d8630aaa 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/secret_store_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py @@ -1,6 +1,7 @@ -from typing import List, Literal, Optional, Type +from typing import Any, List, Literal, Optional, Type, Union from uuid import uuid4 +import numpy as np from pydantic import Field from inference.core.workflows.execution_engine.entities.base import ( @@ -10,6 +11,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, + LIST_OF_VALUES_KIND, STRING_KIND, BatchSelector, ScalarSelector, @@ -108,10 +110,6 @@ def run(self, image: WorkflowImageData) -> BlockResult: class NonBatchSecretStoreUserBlockManifest(WorkflowBlockManifest): type: Literal["non_batch_secret_store_user"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( - title="Input Image", - description="The input image for this step.", - ) secret: ScalarSelector(kind=[STRING_KIND]) @classmethod @@ -131,14 +129,48 @@ class NonBatchSecretStoreUserBlock(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return NonBatchSecretStoreUserBlockManifest - def run(self, image: WorkflowImageData, secret: str) -> BlockResult: + def run(self, secret: str) -> BlockResult: return {"output": secret} +class BlockWithReferenceImagesManifest(WorkflowBlockManifest): + type: Literal["reference_images_comparison"] + image: BatchSelector(kind=[IMAGE_KIND]) + reference_images: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), Any] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="similarity", kind=[LIST_OF_VALUES_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class BlockWithReferenceImagesBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockWithReferenceImagesManifest + + def run( + self, image: WorkflowImageData, reference_images: List[np.ndarray] + ) -> BlockResult: + similarity = [] + for ref_image in reference_images: + similarity.append( + (image.numpy_image == ref_image).sum() / image.numpy_image.size + ) + return {"similarity": similarity} + + def load_blocks() -> List[Type[WorkflowBlock]]: return [ SecretStoreBlock, SecretStoreUserBlock, BatchSecretStoreBlock, NonBatchSecretStoreUserBlock, + BlockWithReferenceImagesBlock, ] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py b/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py index e6c06a6a9..d330f482c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_scalar_selectors.py @@ -42,7 +42,7 @@ def test_workflow_with_scalar_selectors_for_batch_of_images( ) -> None: # given get_plugin_modules_mock.return_value = [ - "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + "tests.workflows.integration_tests.execution.stub_plugins.scalar_selectors_plugin", ] workflow_init_parameters = { "workflows_core.model_manager": model_manager, @@ -80,7 +80,7 @@ def test_workflow_with_scalar_selectors_for_single_image( ) -> None: # given get_plugin_modules_mock.return_value = [ - "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + "tests.workflows.integration_tests.execution.stub_plugins.scalar_selectors_plugin", ] workflow_init_parameters = { "workflows_core.model_manager": model_manager, @@ -119,7 +119,6 @@ def test_workflow_with_scalar_selectors_for_single_image( { "type": "non_batch_secret_store_user", "name": "user", - "image": "$inputs.image", "secret": "$steps.secret.secret", }, ], @@ -141,7 +140,7 @@ def test_workflow_with_batch_oriented_secret_store_for_batch_of_images( ) -> None: # given get_plugin_modules_mock.return_value = [ - "tests.workflows.integration_tests.execution.stub_plugins.secret_store_plugin", + "tests.workflows.integration_tests.execution.stub_plugins.scalar_selectors_plugin", ] workflow_init_parameters = { "workflows_core.model_manager": model_manager, @@ -172,3 +171,64 @@ def test_workflow_with_batch_oriented_secret_store_for_batch_of_images( assert ( result[0]["result"] != result[1]["result"] ), "Expected different results for both outputs, as feature store should fire twice for two input images" + + +WORKFLOW_WITH_REFERENCE_SIMILARITY = { + "version": "1.3.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "reference"}, + ], + "steps": [ + { + "type": "reference_images_comparison", + "name": "comparison", + "image": "$inputs.image", + "reference_images": "$inputs.reference", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "similarity", + "selector": "$steps.comparison.similarity", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batch_oriented_secret_store_for_batch_of_images( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.scalar_selectors_plugin", + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + black_image = np.zeros((192, 168, 3), dtype=np.uint8) + red_image = np.ones((192, 168, 3), dtype=np.uint8) * (0, 0, 255) + white_image = (np.ones((192, 168, 3), dtype=np.uint8) * 255).astype(np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_REFERENCE_SIMILARITY, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [black_image, red_image], + "reference": [black_image, red_image, white_image], + } + ) + + # then + assert len(result) == 2 + assert np.allclose(result[0]["similarity"], [1.0, 2 / 3, 0.0], atol=1e-2) + assert np.allclose(result[1]["similarity"], [2 / 3, 1.0, 1 / 3], atol=1e-2) From 08f20275d3c75db6b3435f9b88382ec3b4f360c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 5 Nov 2024 16:26:30 +0100 Subject: [PATCH 21/67] Fix block assembler --- .../execution_engine/v1/dynamic_blocks/block_assembler.py | 5 ++++- .../workflows/execution_engine/v1/dynamic_blocks/entities.py | 3 ++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 5447f7f59..41d84e824 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -14,6 +14,7 @@ WILDCARD_KIND, BatchSelector, Kind, + ScalarSelector, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -250,8 +251,10 @@ def collect_python_types_for_selectors( result.append(WorkflowParameterSelector(kind=selector_kind)) elif selector_type is SelectorType.STEP_OUTPUT: result.append(StepOutputSelector(kind=selector_kind)) - elif selector_type is SelectorType.BATCH_OF_DATA: + elif selector_type is SelectorType.BATCH: result.append(BatchSelector(kind=selector_kind)) + elif selector_type is SelectorType.SCALAR: + result.append(ScalarSelector(kind=selector_kind)) else: raise DynamicBlockError( public_message=f"Could not recognise selector type `{selector_type}` declared for input `{input_name}` " diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 72f473b20..53faec027 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -9,7 +9,8 @@ class SelectorType(Enum): STEP_OUTPUT_IMAGE = "step_output_image" INPUT_PARAMETER = "input_parameter" STEP_OUTPUT = "step_output" - BATCH_OF_DATA = "batch_of_data" + BATCH = "batch" + SCALAR = "scalar" class ValueType(Enum): From e14a96077902a4a8f2fa3f1ac141dd3bcd716fa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 7 Nov 2024 23:34:22 +0100 Subject: [PATCH 22/67] Refactor the PR to use Selector(...) type annotation for manifest selectors --- .../analytics/data_aggregator/v1.py | 9 +-- .../core_steps/analytics/line_counter/v1.py | 13 ++-- .../core_steps/analytics/line_counter/v2.py | 9 ++- .../core_steps/analytics/path_deviation/v1.py | 13 ++-- .../core_steps/analytics/path_deviation/v2.py | 9 ++- .../core_steps/analytics/time_in_zone/v1.py | 19 +++--- .../core_steps/analytics/time_in_zone/v2.py | 13 ++-- .../classical_cv/camera_focus/v1.py | 4 +- .../core_steps/classical_cv/contours/v1.py | 7 +-- .../classical_cv/convert_grayscale/v1.py | 4 +- .../classical_cv/distance_measurement/v1.py | 13 ++-- .../classical_cv/dominant_color/v1.py | 11 ++-- .../core_steps/classical_cv/image_blur/v1.py | 9 ++- .../classical_cv/image_preprocessing/v1.py | 13 ++-- .../classical_cv/pixel_color_count/v1.py | 11 ++-- .../core_steps/classical_cv/sift/v1.py | 4 +- .../classical_cv/sift_comparison/v1.py | 19 +++--- .../classical_cv/sift_comparison/v2.py | 45 +++++-------- .../classical_cv/size_measurement/v1.py | 9 ++- .../classical_cv/template_matching/v1.py | 25 ++++---- .../core_steps/classical_cv/threshold/v1.py | 11 ++-- .../core_steps/flow_control/continue_if/v1.py | 6 +- .../flow_control/rate_limiter/v1.py | 6 +- .../workflows/core_steps/formatters/csv/v1.py | 13 ++-- .../core_steps/formatters/expression/v1.py | 8 +-- .../first_non_empty_or_default/v1.py | 4 +- .../core_steps/formatters/json_parser/v1.py | 4 +- .../formatters/property_definition/v1.py | 7 +-- .../formatters/vlm_as_classifier/v1.py | 11 ++-- .../formatters/vlm_as_detector/v1.py | 11 ++-- .../detections_classes_replacement/v1.py | 14 ++--- .../fusion/detections_consensus/v1.py | 37 +++++------ .../core_steps/fusion/detections_stitch/v1.py | 11 ++-- .../fusion/dimension_collapse/v1.py | 4 +- .../models/foundation/anthropic_claude/v1.py | 41 ++++++------ .../models/foundation/clip_comparison/v1.py | 11 ++-- .../models/foundation/clip_comparison/v2.py | 13 ++-- .../models/foundation/cog_vlm/v1.py | 11 ++-- .../models/foundation/florence2/v1.py | 39 ++++++------ .../models/foundation/google_gemini/v1.py | 39 ++++++------ .../models/foundation/google_vision_ocr/v1.py | 7 +-- .../core_steps/models/foundation/lmm/v1.py | 17 +++-- .../models/foundation/lmm_classifier/v1.py | 17 +++-- .../core_steps/models/foundation/ocr/v1.py | 8 +-- .../core_steps/models/foundation/openai/v1.py | 17 +++-- .../core_steps/models/foundation/openai/v2.py | 41 ++++++------ .../models/foundation/segment_anything2/v1.py | 25 ++++---- .../foundation/stability_ai/inpainting/v1.py | 27 ++++---- .../models/foundation/yolo_world/v1.py | 15 +++-- .../roboflow/instance_segmentation/v1.py | 43 ++++++------- .../models/roboflow/keypoint_detection/v1.py | 41 ++++++------ .../roboflow/multi_class_classification/v1.py | 19 +++--- .../roboflow/multi_label_classification/v1.py | 19 +++--- .../models/roboflow/object_detection/v1.py | 43 ++++++------- .../third_party/barcode_detection/v1.py | 8 +-- .../third_party/qr_code_detection/v1.py | 8 +-- .../core_steps/sinks/email_notification/v1.py | 25 ++++---- .../core_steps/sinks/local_file/v1.py | 11 ++-- .../sinks/roboflow/custom_metadata/v1.py | 11 ++-- .../sinks/roboflow/dataset_upload/v1.py | 21 +++---- .../sinks/roboflow/dataset_upload/v2.py | 33 +++++----- .../workflows/core_steps/sinks/webhook/v1.py | 33 +++++----- .../absolute_static_crop/v1.py | 17 +++-- .../transformations/bounding_rect/v1.py | 10 +-- .../transformations/byte_tracker/v1.py | 17 +++-- .../transformations/byte_tracker/v2.py | 13 ++-- .../transformations/byte_tracker/v3.py | 17 +++-- .../transformations/detection_offset/v1.py | 13 ++-- .../transformations/detections_filter/v1.py | 12 ++-- .../detections_transformation/v1.py | 12 ++-- .../transformations/dynamic_crop/v1.py | 19 +++--- .../transformations/dynamic_zones/v1.py | 11 ++-- .../transformations/image_slicer/v1.py | 13 ++-- .../perspective_correction/v1.py | 23 +++---- .../relative_static_crop/v1.py | 35 +++++------ .../stabilize_detections/v1.py | 13 ++-- .../transformations/stitch_images/v1.py | 11 ++-- .../stitch_ocr_detections/v1.py | 11 ++-- .../visualizations/background_color/v1.py | 6 +- .../core_steps/visualizations/blur/v1.py | 4 +- .../visualizations/bounding_box/v1.py | 6 +- .../core_steps/visualizations/circle/v1.py | 4 +- .../core_steps/visualizations/color/v1.py | 4 +- .../core_steps/visualizations/common/base.py | 11 ++-- .../visualizations/common/base_colorable.py | 10 +-- .../core_steps/visualizations/corner/v1.py | 6 +- .../core_steps/visualizations/crop/v1.py | 8 +-- .../core_steps/visualizations/dot/v1.py | 8 +-- .../core_steps/visualizations/ellipse/v1.py | 8 +-- .../core_steps/visualizations/halo/v1.py | 9 ++- .../core_steps/visualizations/keypoint/v1.py | 19 +++--- .../core_steps/visualizations/label/v1.py | 16 ++--- .../core_steps/visualizations/line_zone/v1.py | 19 +++--- .../core_steps/visualizations/mask/v1.py | 7 +-- .../visualizations/model_comparison/v1.py | 15 +++-- .../core_steps/visualizations/pixelate/v1.py | 4 +- .../core_steps/visualizations/polygon/v1.py | 7 +-- .../visualizations/polygon_zone/v1.py | 9 ++- .../visualizations/reference_path/v1.py | 11 ++-- .../core_steps/visualizations/trace/v1.py | 8 +-- .../core_steps/visualizations/triangle/v1.py | 10 +-- .../execution_engine/entities/types.py | 63 ++++++++----------- .../introspection/schema_parser.py | 22 +++++-- .../v1/compiler/graph_constructor.py | 11 ++-- .../v1/dynamic_blocks/block_assembler.py | 22 ++++--- .../v1/dynamic_blocks/entities.py | 8 ++- .../v1/introspection/inputs_discovery.py | 29 ++++++--- inference/core/workflows/prototypes/block.py | 8 ++- .../__init__.py | 39 ++++++------ .../scalar_selectors_plugin/__init__.py | 19 +++--- 110 files changed, 801 insertions(+), 925 deletions(-) diff --git a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py index 73227e699..f4589759c 100644 --- a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py +++ b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py @@ -18,9 +18,7 @@ FLOAT_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -192,10 +190,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/data_aggregator@v1"] - data: Dict[ - str, - Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], - ] = Field( + data: Dict[str, Selector()] = Field( description="References data to be used to construct each and every column", examples=[ { diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v1.py b/inference/core/workflows/core_steps/analytics/line_counter/v1.py index aa0ff49ac..64c43be2f 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v1.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v1.py @@ -14,9 +14,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - WorkflowVideoMetadataSelector, + VIDEO_METADATA_KIND, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,8 +48,8 @@ class LineCounterManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/line_counter@v1"] - metadata: WorkflowVideoMetadataSelector - detections: BatchSelector( + metadata: Selector(kind=[VIDEO_METADATA_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -60,11 +59,11 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Point from the detection for triggering line crossing.", default="CENTER", examples=["CENTER"], diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v2.py b/inference/core/workflows/core_steps/analytics/line_counter/v2.py index 1d5b9cb9a..953122763 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v2.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v2.py @@ -14,8 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( @@ -55,7 +54,7 @@ class LineCounterManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/line_counter@v2"] image: WorkflowImageSelector - detections: BatchSelector( + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,11 +64,11 @@ class LineCounterManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], ) - line_segment: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + line_segment: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points. For line [[0, 100], [100, 100]] line will count objects entering from the bottom as IN", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Point from the detection for triggering line crossing.", default="CENTER", examples=["CENTER"], diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py index 18634c9a2..2cdca4202 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py @@ -17,9 +17,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - WorkflowVideoMetadataSelector, + VIDEO_METADATA_KIND, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -52,8 +51,8 @@ class PathDeviationManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/path_deviation_analytics@v1"] - metadata: WorkflowVideoMetadataSelector - detections: BatchSelector( + metadata: Selector(kind=[VIDEO_METADATA_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -62,12 +61,12 @@ class PathDeviationManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py index e641d9ea0..7b12ae2da 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v2.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v2.py @@ -17,8 +17,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( @@ -54,7 +53,7 @@ class PathDeviationManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/path_deviation_analytics@v2"] image: WorkflowImageSelector - detections: BatchSelector( + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -63,12 +62,12 @@ class PathDeviationManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND]), Literal[tuple(sv.Position.list())]] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - reference_path: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + reference_path: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index 6db742fa3..67d31a2c9 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -20,9 +20,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - WorkflowVideoMetadataSelector, + VIDEO_METADATA_KIND, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -51,13 +50,13 @@ class TimeInZoneManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/time_in_zone@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], ) - metadata: WorkflowVideoMetadataSelector - detections: BatchSelector( + metadata: Selector(kind=[VIDEO_METADATA_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -66,21 +65,21 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - remove_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + remove_out_of_zone_detections: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will be filtered out", default=True, examples=[True, False], ) - reset_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + reset_out_of_zone_detections: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will have time reset", default=True, examples=[True, False], diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index d0c95994c..050d28a81 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -18,8 +18,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( @@ -59,7 +58,7 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], ) - detections: BatchSelector( + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,21 +67,21 @@ class TimeInZoneManifest(WorkflowBlockManifest): description="Predictions", examples=["$steps.object_detection_model.predictions"], ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Zones (one for each batch) in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.zones"], ) - triggering_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + triggering_anchor: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}", default="CENTER", examples=["CENTER"], ) - remove_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + remove_out_of_zone_detections: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will be filtered out", default=True, examples=[True, False], ) - reset_out_of_zone_detections: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + reset_out_of_zone_detections: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If true, detections found outside of zone will have time reset", default=True, examples=[True, False], diff --git a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py index 84b7c19b4..efa32b2cd 100644 --- a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, IMAGE_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -44,7 +44,7 @@ class CameraFocusManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/contours/v1.py b/inference/core/workflows/core_steps/classical_cv/contours/v1.py index 8a7d9891f..53851c7d4 100644 --- a/inference/core/workflows/core_steps/classical_cv/contours/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/contours/v1.py @@ -16,8 +16,7 @@ IMAGE_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -44,14 +43,14 @@ class ImageContoursDetectionManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - line_thickness: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( + line_thickness: Union[Selector(kind=[INTEGER_KIND]), int] = Field( description="Line thickness for drawing contours.", default=3, examples=[3, "$inputs.line_thickness"], diff --git a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py index acc7535b1..538a837ff 100644 --- a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py @@ -12,7 +12,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -39,7 +39,7 @@ class ConvertGrayscaleManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py index 2151db592..0e45909a8 100644 --- a/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/distance_measurement/v1.py @@ -10,8 +10,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -46,7 +45,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/distance_measurement@v1"] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -80,7 +79,7 @@ class BlockManifest(WorkflowBlockManifest): description="Select how to calibrate the measurement of distance between objects.", ) - reference_object_class_name: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + reference_object_class_name: Union[str, Selector(kind=[STRING_KIND])] = Field( title="Reference Object Class Name", description="The class name of the reference object.", default="reference-object", @@ -95,7 +94,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - reference_width: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( + reference_width: Union[float, Selector(kind=[FLOAT_KIND])] = Field( title="Width", default=2.5, description="Width of the reference object in centimeters", @@ -111,7 +110,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - reference_height: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + reference_height: Union[float, Selector(kind=[FLOAT_KIND])] = Field( # type: ignore title="Height", default=2.5, description="Height of the reference object in centimeters", @@ -127,7 +126,7 @@ class BlockManifest(WorkflowBlockManifest): }, ) - pixel_ratio: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( + pixel_ratio: Union[float, Selector(kind=[FLOAT_KIND])] = Field( title="Reference Pixel-to-Centimeter Ratio", description="The pixel-to-centimeter ratio of the input image, i.e. 1 centimeter = 100 pixels.", default=100, diff --git a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py index 01218ab32..8c66cbb4e 100644 --- a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py @@ -11,8 +11,7 @@ IMAGE_KIND, INTEGER_KIND, RGB_COLOR_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,13 +46,13 @@ class DominantColorManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - color_clusters: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + color_clusters: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Color Clusters", description="Number of dominant colors to identify. Higher values increase precision but may slow processing.", default=4, @@ -61,7 +60,7 @@ class DominantColorManifest(WorkflowBlockManifest): gt=0, le=10, ) - max_iterations: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + max_iterations: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Max Iterations", description="Max number of iterations to perform. Higher values increase precision but may slow processing.", default=100, @@ -69,7 +68,7 @@ class DominantColorManifest(WorkflowBlockManifest): gt=0, le=500, ) - target_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + target_size: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Target Size", description="Sets target for the smallest dimension of the downsampled image in pixels. Lower values increase speed but may reduce precision.", default=100, diff --git a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py index 1fde55272..f1d79a8a1 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py @@ -15,8 +15,7 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -44,7 +43,7 @@ class ImageBlurManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], @@ -52,7 +51,7 @@ class ImageBlurManifest(WorkflowBlockManifest): ) blur_type: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal["average", "gaussian", "median", "bilateral"], ] = Field( default="gaussian", @@ -60,7 +59,7 @@ class ImageBlurManifest(WorkflowBlockManifest): examples=["average", "$inputs.blur_type"], ) - kernel_size: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( + kernel_size: Union[Selector(kind=[INTEGER_KIND]), int] = Field( default=5, description="Size of the average pooling kernel used for blurring.", examples=[5, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py index 74428605c..3475f739f 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py @@ -12,8 +12,7 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,7 +46,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], @@ -56,7 +55,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): task_type: Literal["resize", "rotate", "flip"] = Field( description="Preprocessing task to be applied to the image.", ) - width: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + width: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Width", default=640, description="Width of the image to be resized to.", @@ -71,7 +70,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, }, ) - height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + height: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Height", default=640, description="Height of the image to be resized to.", @@ -86,7 +85,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): }, }, ) - rotation_degrees: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + rotation_degrees: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore title="Degrees of Rotation", description="Positive value to rotate clockwise, negative value to rotate counterclockwise", default=90, @@ -102,7 +101,7 @@ class ImagePreprocessingManifest(WorkflowBlockManifest): } }, ) - flip_type: Union[ScalarSelector(kind=[STRING_KIND]), Literal["vertical", "horizontal", "both"]] = Field( # type: ignore + flip_type: Union[Selector(kind=[STRING_KIND]), Literal["vertical", "horizontal", "both"]] = Field( # type: ignore title="Flip Type", description="Type of flip to be applied to the image.", default="vertical", diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index dd9bb6405..99e706ce7 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -13,8 +13,7 @@ INTEGER_KIND, RGB_COLOR_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -39,15 +38,15 @@ class ColorPixelCountManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) target_color: Union[ - ScalarSelector(kind=[STRING_KIND]), - BatchSelector(kind=[RGB_COLOR_KIND]), + Selector(kind=[STRING_KIND]), + Selector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( @@ -56,7 +55,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): "(like (18, 17, 67)).", examples=["#431112", "$inputs.target_color", (18, 17, 67)], ) - tolerance: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( + tolerance: Union[Selector(kind=[INTEGER_KIND]), int] = Field( default=10, description="Tolerance for color matching.", examples=[10, "$inputs.tolerance"], diff --git a/inference/core/workflows/core_steps/classical_cv/sift/v1.py b/inference/core/workflows/core_steps/classical_cv/sift/v1.py index 2d77286e8..8c5f95624 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift/v1.py @@ -15,7 +15,7 @@ IMAGE_KEYPOINTS_KIND, IMAGE_KIND, NUMPY_ARRAY_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,7 +49,7 @@ class SIFTDetectionManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py index 84f25043f..8341a03f1 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py @@ -9,8 +9,7 @@ BOOLEAN_KIND, INTEGER_KIND, NUMPY_ARRAY_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -39,22 +38,20 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/sift_comparison@v1"] - descriptor_1: BatchSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_1: Selector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the first image to compare", examples=["$steps.sift.descriptors"], ) - descriptor_2: BatchSelector(kind=[NUMPY_ARRAY_KIND]) = Field( + descriptor_2: Selector(kind=[NUMPY_ARRAY_KIND]) = Field( description="Reference to SIFT descriptors from the second image to compare", examples=["$steps.sift.descriptors"], ) - good_matches_threshold: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = ( - Field( - default=50, - description="Threshold for the number of good matches to consider the images as matching", - examples=[50, "$inputs.good_matches_threshold"], - ) + good_matches_threshold: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=50, + description="Threshold for the number of good matches to consider the images as matching", + examples=[50, "$inputs.good_matches_threshold"], ) - ratio_threshold: Union[float, ScalarSelector(kind=[INTEGER_KIND])] = Field( + ratio_threshold: Union[float, Selector(kind=[INTEGER_KIND])] = Field( default=0.7, description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " "the distance of the closest match to the distance of the second closest match. A lower " diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py index 909197f1f..afdf48b1b 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py @@ -17,10 +17,7 @@ INTEGER_KIND, NUMPY_ARRAY_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,47 +46,35 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/sift_comparison@v2"] - input_1: Union[ - WorkflowImageSelector, - StepOutputImageSelector, - BatchSelector(kind=[NUMPY_ARRAY_KIND]), - ] = Field( + input_1: Union[Selector(kind=[IMAGE_KIND, NUMPY_ARRAY_KIND]),] = Field( description="Reference to Image or SIFT descriptors from the first image to compare", examples=["$inputs.image1", "$steps.sift.descriptors"], ) - input_2: Union[ - WorkflowImageSelector, - StepOutputImageSelector, - BatchSelector(kind=[NUMPY_ARRAY_KIND]), - ] = Field( + input_2: Union[Selector(kind=[IMAGE_KIND, NUMPY_ARRAY_KIND]),] = Field( description="Reference to Image or SIFT descriptors from the second image to compare", examples=["$inputs.image2", "$steps.sift.descriptors"], ) - good_matches_threshold: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = ( - Field( - default=50, - description="Threshold for the number of good matches to consider the images as matching", - examples=[50, "$inputs.good_matches_threshold"], - ) + good_matches_threshold: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=50, + description="Threshold for the number of good matches to consider the images as matching", + examples=[50, "$inputs.good_matches_threshold"], ) - ratio_threshold: Union[float, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( - Field( - default=0.7, - description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " - "the distance of the closest match to the distance of the second closest match. A lower " - "ratio indicates stricter filtering.", - examples=[0.7, "$inputs.ratio_threshold"], - ) + ratio_threshold: Union[float, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + default=0.7, + description="Ratio threshold for the ratio test, which is used to filter out poor matches by comparing " + "the distance of the closest match to the distance of the second closest match. A lower " + "ratio indicates stricter filtering.", + examples=[0.7, "$inputs.ratio_threshold"], ) matcher: Union[ Literal["FlannBasedMatcher", "BFMatcher"], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="FlannBasedMatcher", description="Matcher to use for comparing the SIFT descriptors", examples=["FlannBasedMatcher", "$inputs.matcher"], ) - visualize: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + visualize: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="Whether to visualize the keypoints and matches between the two images", examples=[True, "$inputs.visualize"], diff --git a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py index 5d327845e..ec2c2bba7 100644 --- a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py @@ -14,8 +14,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -55,7 +54,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): } ) type: Literal[f"roboflow_core/size_measurement@v1"] - reference_predictions: BatchSelector( + reference_predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -64,7 +63,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): description="Predictions from the reference object model", examples=["$segmentation.reference_predictions"], ) - object_predictions: BatchSelector( + object_predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -77,7 +76,7 @@ class SizeMeasurementManifest(WorkflowBlockManifest): str, Tuple[float, float], List[float], - ScalarSelector( + Selector( kind=[STRING_KIND, LIST_OF_VALUES_KIND], ), ] = Field( # type: ignore diff --git a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py index 3aa6e101c..a58b82f60 100644 --- a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py @@ -27,9 +27,8 @@ IMAGE_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -66,37 +65,37 @@ class TemplateMatchingManifest(WorkflowBlockManifest): "block_type": "classical_computer_vision", } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - template: BatchSelector(kind=[IMAGE_KIND]) = Field( + template: Selector(kind=[IMAGE_KIND]) = Field( title="Template Image", description="The template image for this step.", examples=["$inputs.template", "$steps.cropping.template"], validation_alias=AliasChoices("template", "templates"), ) - matching_threshold: Union[ScalarSelector(kind=[FLOAT_KIND]), float] = Field( + matching_threshold: Union[Selector(kind=[FLOAT_KIND]), float] = Field( title="Matching Threshold", description="The threshold value for template matching.", default=0.8, examples=[0.8, "$inputs.threshold"], ) - apply_nms: Union[ScalarSelector(kind=[BOOLEAN_KIND]), bool] = Field( + apply_nms: Union[Selector(kind=[BOOLEAN_KIND]), bool] = Field( title="Apply NMS", description="Flag to decide if NMS should be applied at the output detections.", default=True, examples=["$inputs.apply_nms", False], ) - nms_threshold: Union[ - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), FloatZeroToOne - ] = Field( - title="NMS threshold", - description="The threshold value NMS procedure (if to be applied).", - default=0.5, - examples=["$inputs.nms_threshold", 0.3], + nms_threshold: Union[Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), FloatZeroToOne] = ( + Field( + title="NMS threshold", + description="The threshold value NMS procedure (if to be applied).", + default=0.5, + examples=["$inputs.nms_threshold", 0.3], + ) ) @classmethod diff --git a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py index 0156b63e2..f091cd48d 100644 --- a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py @@ -15,8 +15,7 @@ IMAGE_KIND, INTEGER_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -43,7 +42,7 @@ class ImageThresholdManifest(WorkflowBlockManifest): } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], @@ -51,7 +50,7 @@ class ImageThresholdManifest(WorkflowBlockManifest): ) threshold_type: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal[ "binary", "binary_inv", @@ -68,12 +67,12 @@ class ImageThresholdManifest(WorkflowBlockManifest): examples=["binary", "$inputs.threshold_type"], ) - thresh_value: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( + thresh_value: Union[Selector(kind=[INTEGER_KIND]), int] = Field( description="Threshold value.", examples=[127, "$inputs.thresh_value"], ) - max_value: Union[ScalarSelector(kind=[INTEGER_KIND]), int] = Field( + max_value: Union[Selector(kind=[INTEGER_KIND]), int] = Field( description="Maximum value for thresholding", default=255, examples=[255, "$inputs.max_value"], diff --git a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py index 2bd0fb73d..5f9411dd6 100644 --- a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py +++ b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py @@ -10,10 +10,8 @@ ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, - ScalarSelector, + Selector, StepSelector, - WorkflowImageSelector, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -63,7 +61,7 @@ class BlockManifest(WorkflowBlockManifest): ) evaluation_parameters: Dict[ str, - Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], + Selector(), ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[{"left": "$inputs.some"}], diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index 2031ba29d..cc13b81d2 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -5,10 +5,8 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, - ScalarSelector, + Selector, StepSelector, - WorkflowImageSelector, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -61,7 +59,7 @@ class RateLimiterManifest(WorkflowBlockManifest): default=1.0, ge=0.0, ) - depends_on: Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()] = Field( + depends_on: Selector() = Field( description="Reference to any output of the the step which immediately preceeds this branch.", examples=["$steps.model"], ) diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index b2ec874f0..03fd5e2af 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -17,9 +17,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( STRING_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -138,9 +136,8 @@ class BlockManifest(WorkflowBlockManifest): columns_data: Dict[ str, Union[ - WorkflowImageSelector, - ScalarSelector(), - BatchSelector(), + Selector(points_to_batch=False), + Selector(points_to_batch=True), str, int, float, @@ -177,8 +174,8 @@ def protect_timestamp_column(cls, value: dict) -> dict: return value @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["columns_data"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/formatters/expression/v1.py b/inference/core/workflows/core_steps/formatters/expression/v1.py index d020917a4..4934c02f0 100644 --- a/inference/core/workflows/core_steps/formatters/expression/v1.py +++ b/inference/core/workflows/core_steps/formatters/expression/v1.py @@ -15,11 +15,7 @@ build_operations_chain, ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition -from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, - ScalarSelector, - WorkflowImageSelector, -) +from inference.core.workflows.execution_engine.entities.types import Selector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -109,7 +105,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal["roboflow_core/expression@v1", "Expression"] data: Dict[ str, - Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], + Union[Selector()], ] = Field( description="References data to be used to construct results", examples=[ diff --git a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py index 74716ea59..fe409e283 100644 --- a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py +++ b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py @@ -6,7 +6,7 @@ Batch, OutputDefinition, ) -from inference.core.workflows.execution_engine.entities.types import BatchSelector +from inference.core.workflows.execution_engine.entities.types import Selector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -35,7 +35,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/first_non_empty_or_default@v1", "FirstNonEmptyOrDefault" ] - data: List[BatchSelector()] = Field( + data: List[Selector()] = Field( description="Reference data to replace empty values", examples=["$steps.my_step.predictions"], min_items=1, diff --git a/inference/core/workflows/core_steps/formatters/json_parser/v1.py b/inference/core/workflows/core_steps/formatters/json_parser/v1.py index 23c82a41f..a3cad763b 100644 --- a/inference/core/workflows/core_steps/formatters/json_parser/v1.py +++ b/inference/core/workflows/core_steps/formatters/json_parser/v1.py @@ -10,7 +10,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, LANGUAGE_MODEL_OUTPUT_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -63,7 +63,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/json_parser@v1"] - raw_json: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + raw_json: Selector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( description="The string with raw JSON to parse.", examples=[["$steps.lmm.output"]], ) diff --git a/inference/core/workflows/core_steps/formatters/property_definition/v1.py b/inference/core/workflows/core_steps/formatters/property_definition/v1.py index f8f8eeef7..80cc546aa 100644 --- a/inference/core/workflows/core_steps/formatters/property_definition/v1.py +++ b/inference/core/workflows/core_steps/formatters/property_definition/v1.py @@ -9,10 +9,7 @@ build_operations_chain, ) from inference.core.workflows.execution_engine.entities.base import OutputDefinition -from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, - WorkflowImageSelector, -) +from inference.core.workflows.execution_engine.entities.types import Selector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -57,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): "PropertyDefinition", "PropertyExtraction", ] - data: Union[WorkflowImageSelector, BatchSelector()] = Field( + data: Selector() = Field( description="Reference data to extract property from", examples=["$steps.my_step.predictions"], ) diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index 1305f600f..e4b1fd582 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -17,8 +17,7 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -65,18 +64,18 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/vlm_as_classifier@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: Selector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], ) classes: Union[ - ScalarSelector(kind=[LIST_OF_VALUES_KIND]), - BatchSelector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), List[str], ] = Field( description="List of all classes used by the model, required to " diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index 68ca8a511..334ad6c9e 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -32,8 +32,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -93,19 +92,19 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/vlm_as_detector@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - vlm_output: BatchSelector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + vlm_output: Selector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( title="VLM Output", description="The string with raw classification prediction to parse.", examples=[["$steps.lmm.output"]], ) classes: Optional[ Union[ - ScalarSelector(kind=[LIST_OF_VALUES_KIND]), - BatchSelector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), List[str], ] ] = Field( diff --git a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py index 06138a7ab..bad1ce85e 100644 --- a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py @@ -19,7 +19,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -54,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): "roboflow_core/detections_classes_replacement@v1", "DetectionsClassesReplacement", ] - object_detection_predictions: BatchSelector( + object_detection_predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -65,12 +65,10 @@ class BlockManifest(WorkflowBlockManifest): description="The output of a detection model describing the bounding boxes that will have classes replaced.", examples=["$steps.my_object_detection_model.predictions"], ) - classification_predictions: BatchSelector(kind=[CLASSIFICATION_PREDICTION_KIND]) = ( - Field( - title="Classification results for crops", - description="The output of classification model for crops taken based on RoIs pointed as the other parameter", - examples=["$steps.my_classification_model.predictions"], - ) + classification_predictions: Selector(kind=[CLASSIFICATION_PREDICTION_KIND]) = Field( + title="Classification results for crops", + description="The output of classification model for crops taken based on RoIs pointed as the other parameter", + examples=["$steps.my_classification_model.predictions"], ) @classmethod diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index 1d482b03b..6c4588604 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -35,9 +35,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -81,7 +80,7 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/detections_consensus@v1", "DetectionsConsensus"] predictions_batches: List[ - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -94,31 +93,29 @@ class BlockManifest(WorkflowBlockManifest): examples=[["$steps.a.predictions", "$steps.b.predictions"]], validation_alias=AliasChoices("predictions_batches", "predictions"), ) - required_votes: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + required_votes: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Required number of votes for single detection from different models to accept detection as output detection", examples=[2, "$inputs.required_votes"], ) - class_aware: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + class_aware: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Flag to decide if merging detections is class-aware or only bounding boxes aware", examples=[True, "$inputs.class_aware"], ) - iou_threshold: Union[ - FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]) - ] = Field( - default=0.3, - description="IoU threshold to consider detections from different models as matching (increasing votes for region)", - examples=[0.3, "$inputs.iou_threshold"], - ) - confidence: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( + iou_threshold: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( Field( - default=0.0, - description="Confidence threshold for merged detections", - examples=[0.1, "$inputs.confidence"], + default=0.3, + description="IoU threshold to consider detections from different models as matching (increasing votes for region)", + examples=[0.3, "$inputs.iou_threshold"], ) ) + confidence: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + default=0.0, + description="Confidence threshold for merged detections", + examples=[0.1, "$inputs.confidence"], + ) classes_to_consider: Optional[ - Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] + Union[List[str], Selector(kind=[LIST_OF_VALUES_KIND])] ] = Field( default=None, description="Optional list of classes to consider in consensus procedure.", @@ -128,7 +125,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ PositiveInt, Dict[str, PositiveInt], - ScalarSelector(kind=[INTEGER_KIND, DICTIONARY_KIND]), + Selector(kind=[INTEGER_KIND, DICTIONARY_KIND]), ] ] = Field( default=None, @@ -152,8 +149,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions_batches"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 57a8a39a7..26e6fc92b 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -24,9 +24,8 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -58,11 +57,11 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detections_stitch@v1"] - reference_image: BatchSelector(kind=[IMAGE_KIND]) = Field( + reference_image: Selector(kind=[IMAGE_KIND]) = Field( description="Image that was origin to take crops that yielded predictions.", examples=["$inputs.image"], ) - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -73,7 +72,7 @@ class BlockManifest(WorkflowBlockManifest): ) overlap_filtering_strategy: Union[ Literal["none", "nms", "nmm"], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( default="nms", description="Which strategy to employ when filtering overlapping boxes. " @@ -82,7 +81,7 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of overlap filtering strategy. If box intersection over union is above this " diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index e1b3a974b..890875781 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( LIST_OF_VALUES_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -42,7 +42,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/dimension_collapse@v1", "DimensionCollapse"] - data: BatchSelector() = Field( + data: Selector() = Field( description="Reference to step outputs at depth level n to be concatenated and moved into level n-1.", examples=["$steps.ocr_step.results"], ) diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 2d5c70c7a..98655132e 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -26,9 +26,8 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -99,7 +98,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/anthropic_claude@v1"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", @@ -114,7 +113,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[Selector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Claude model", examples=["my prompt", "$inputs.prompt"], @@ -137,28 +136,26 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( - Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, - }, + classes: Optional[Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, }, }, - ) + }, ) - api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Your Antropic API key", examples=["xxx-xxx", "$inputs.antropics_api_key"], private=True, ) model_version: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal[ "claude-3-5-sonnet", "claude-3-opus", "claude-3-sonnet", "claude-3-haiku" ], @@ -171,14 +168,14 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( + temperature: Optional[Union[float, Selector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', ge=0.0, le=2.0, ) - max_image_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_image_size: Union[int, Selector(kind=[INTEGER_KIND])] = Field( description="Maximum size of the image - if input has larger side, it will be downscaled, keeping aspect ratio", default=1024, ) @@ -209,8 +206,8 @@ def validate(self) -> "BlockManifest": return self @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py index 537b00506..df0a566bd 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py @@ -32,9 +32,8 @@ LIST_OF_VALUES_KIND, PARENT_ID_KIND, PREDICTION_TYPE_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -70,16 +69,16 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/clip_comparison@v1", "ClipComparison"] name: str = Field(description="Unique name of step in workflows") - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - texts: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + texts: Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="List of texts to calculate similarity against each input image", examples=[["a", "b", "c"], "$inputs.texts"], validation_alias=AliasChoices("texts", "text"), ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index e5b35d04f..b325f41fa 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -33,9 +33,8 @@ LIST_OF_VALUES_KIND, PARENT_ID_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -69,8 +68,8 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/clip_comparison@v2"] name: str = Field(description="Unique name of step in workflows") - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - classes: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + classes: Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="List of classes to calculate similarity against each input image", examples=[["a", "b", "c"], "$inputs.texts"], min_items=1, @@ -87,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): "ViT-L-14-336px", "ViT-L-14", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( default="ViT-B-16", description="Variant of CLIP model", @@ -95,8 +94,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py index e6fc1d2c7..bd9930a1e 100644 --- a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py @@ -26,9 +26,8 @@ PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -63,8 +62,8 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/cog_vlm@v1", "CogVLM"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + prompt: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the CogVLM model", examples=["my prompt", "$inputs.prompt"], ) @@ -78,8 +77,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 257b43621..b8784e2e4 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -23,9 +23,8 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -162,9 +161,9 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/florence_2@v1"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField model_version: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal["florence-2-base", "florence-2-large"], ] = Field( default="florence-2-base", @@ -188,7 +187,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[Selector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Florence-2 model", examples=["my prompt", "$inputs.prompt"], @@ -198,33 +197,31 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( - Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, - }, + classes: Optional[Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, }, }, - ) + }, ) grounding_detection: Optional[ Union[ List[int], List[float], - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, ] ), - ScalarSelector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), ] ] = Field( default=None, @@ -256,8 +253,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @model_validator(mode="after") def validate(self) -> "BlockManifest": diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index 9dc049160..6c66ca710 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -24,9 +24,8 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -108,7 +107,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/google_gemini@v1"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", @@ -123,7 +122,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[Selector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the Gemini model", examples=["my prompt", "$inputs.prompt"], @@ -146,28 +145,26 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( - Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, - }, + classes: Optional[Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, }, }, - ) + }, ) - api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Your Google AI API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, ) model_version: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal["gemini-1.5-flash", "gemini-1.5-pro"], ] = Field( default="gemini-1.5-flash", @@ -178,7 +175,7 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( + temperature: Optional[Union[float, Selector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', @@ -212,8 +209,8 @@ def validate(self) -> "BlockManifest": return self @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py index 5e573fcae..92d6fb86f 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py @@ -23,8 +23,7 @@ IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -61,7 +60,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/google_vision_ocr@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( description="Image to run OCR", examples=["$inputs.image", "$steps.cropping.crops"], ) @@ -80,7 +79,7 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Your Google Vision API key", examples=["xxx-xxx", "$inputs.google_api_key"], private=True, diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index 0dd074dd8..d7872d85b 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -36,9 +36,8 @@ PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -94,14 +93,12 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/lmm@v1", "LMM"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + prompt: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Holds unconstrained text prompt to LMM mode", examples=["my prompt", "$inputs.prompt"], ) - lmm_type: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] - ] = Field( + lmm_type: Union[Selector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"]] = Field( description="Type of LMM to be used", examples=["gpt_4v", "$inputs.lmm_type"] ) lmm_config: LMMConfig = Field( @@ -115,7 +112,7 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( + remote_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( default=None, description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", examples=["xxx-xxx", "$inputs.api_key"], @@ -128,8 +125,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index 8554afa28..c5b9da19d 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -30,9 +30,8 @@ PREDICTION_TYPE_KIND, STRING_KIND, TOP_CLASS_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -68,13 +67,11 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/lmm_for_classification@v1", "LMMForClassification"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - lmm_type: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"] - ] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + lmm_type: Union[Selector(kind=[STRING_KIND]), Literal["gpt_4v", "cog_vlm"]] = Field( description="Type of LMM to be used", examples=["gpt_4v", "$inputs.lmm_type"] ) - classes: Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( + classes: Union[List[str], Selector(kind=[LIST_OF_VALUES_KIND])] = Field( description="List of classes that LMM shall classify against", examples=[["a", "b"], "$inputs.classes"], ) @@ -89,7 +86,7 @@ class BlockManifest(WorkflowBlockManifest): } ], ) - remote_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( + remote_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( default=None, description="Holds API key required to call LMM model - in current state of development, we require OpenAI key when `lmm_type=gpt_4v` and do not require additional API key for CogVLM calls.", examples=["xxx-xxx", "$inputs.api_key"], @@ -97,8 +94,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py index fe54494ac..d6db83b45 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py @@ -31,8 +31,8 @@ PARENT_ID_KIND, PREDICTION_TYPE_KIND, STRING_KIND, - BatchSelector, ImageInputField, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -71,11 +71,11 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/ocr_model@v1", "OCRModel"] name: str = Field(description="Unique name of step in workflows") - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index 787c62188..beac429bb 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -27,9 +27,8 @@ PARENT_ID_KIND, STRING_KIND, WILDCARD_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -71,18 +70,18 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/open_ai@v1", "OpenAI"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - prompt: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + prompt: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], ) - openai_api_key: Union[ScalarSelector(kind=[STRING_KIND]), Optional[str]] = Field( + openai_api_key: Union[Selector(kind=[STRING_KIND]), Optional[str]] = Field( description="Your OpenAI API key", examples=["xxx-xxx", "$inputs.openai_api_key"], private=True, ) openai_model: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] + Selector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] ] = Field( default="gpt-4o", description="Model to be used", @@ -97,7 +96,7 @@ class BlockManifest(WorkflowBlockManifest): ], ) image_detail: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] + Selector(kind=[STRING_KIND]), Literal["auto", "high", "low"] ] = Field( default="auto", description="Indicates the image's quality, with 'high' suggesting it is of high resolution and should be processed or displayed with high fidelity.", @@ -110,8 +109,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index 260a68e0d..51bcae86e 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -23,9 +23,8 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -98,7 +97,7 @@ class BlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/open_ai@v2"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField task_type: TaskType = Field( default="unconstrained", description="Task type to be performed by model. Value determines required parameters and output response.", @@ -112,7 +111,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - prompt: Optional[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + prompt: Optional[Union[Selector(kind=[STRING_KIND]), str]] = Field( default=None, description="Text prompt to the OpenAI model", examples=["my prompt", "$inputs.prompt"], @@ -135,35 +134,33 @@ class BlockManifest(WorkflowBlockManifest): }, }, ) - classes: Optional[Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = ( - Field( - default=None, - description="List of classes to be used", - examples=[["class-a", "class-b"], "$inputs.classes"], - json_schema_extra={ - "relevant_for": { - "task_type": { - "values": TASKS_REQUIRING_CLASSES, - "required": True, - }, + classes: Optional[Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]]] = Field( + default=None, + description="List of classes to be used", + examples=[["class-a", "class-b"], "$inputs.classes"], + json_schema_extra={ + "relevant_for": { + "task_type": { + "values": TASKS_REQUIRING_CLASSES, + "required": True, }, }, - ) + }, ) - api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Your OpenAI API key", examples=["xxx-xxx", "$inputs.openai_api_key"], private=True, ) model_version: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] + Selector(kind=[STRING_KIND]), Literal["gpt-4o", "gpt-4o-mini"] ] = Field( default="gpt-4o", description="Model to be used", examples=["gpt-4o", "$inputs.openai_model"], ) image_detail: Union[ - ScalarSelector(kind=[STRING_KIND]), Literal["auto", "high", "low"] + Selector(kind=[STRING_KIND]), Literal["auto", "high", "low"] ] = Field( default="auto", description="Indicates the image's quality, with 'high' suggesting it is of high resolution and should be processed or displayed with high fidelity.", @@ -173,7 +170,7 @@ class BlockManifest(WorkflowBlockManifest): default=450, description="Maximum number of tokens the model can generate in it's response.", ) - temperature: Optional[Union[float, ScalarSelector(kind=[FLOAT_KIND])]] = Field( + temperature: Optional[Union[float, Selector(kind=[FLOAT_KIND])]] = Field( default=None, description="Temperature to sample from the model - value in range 0.0-2.0, the higher - the more " 'random / "creative" the generations are.', @@ -207,8 +204,8 @@ def validate(self) -> "BlockManifest": return self @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index 111d17eb7..94d5c5ea4 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -38,9 +38,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,9 +78,9 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/segment_anything@v1"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField boxes: Optional[ - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -95,7 +94,7 @@ class BlockManifest(WorkflowBlockManifest): json_schema_extra={"always_visible": True}, ) version: Union[ - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), Literal["hiera_large", "hiera_small", "hiera_tiny", "hiera_b_plus"], ] = Field( default="hiera_tiny", @@ -103,23 +102,21 @@ class BlockManifest(WorkflowBlockManifest): examples=["hiera_large", "$inputs.openai_model"], ) threshold: Union[ - ScalarSelector(kind=[FLOAT_KIND]), + Selector(kind=[FLOAT_KIND]), float, ] = Field( default=0.0, description="Threshold for predicted masks scores", examples=[0.3] ) - multimask_output: Union[Optional[bool], ScalarSelector(kind=[BOOLEAN_KIND])] = ( - Field( - default=True, - description="Flag to determine whether to use sam2 internal multimask or single mask mode. For ambiguous prompts setting to True is recomended.", - examples=[True, "$inputs.multimask_output"], - ) + multimask_output: Union[Optional[bool], Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Flag to determine whether to use sam2 internal multimask or single mask mode. For ambiguous prompts setting to True is recomended.", + examples=[True, "$inputs.multimask_output"], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images", "boxes"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 77707cb1d..9563a1045 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -19,10 +19,7 @@ IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -64,20 +61,18 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stability_ai_inpainting@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( description="The image which was the base to generate VLM prediction", examples=["$inputs.image", "$steps.cropping.crops"], ) - segmentation_mask: BatchSelector(kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND]) = ( - Field( - name="Segmentation Mask", - description="Segmentation masks", - examples=["$steps.model.predictions"], - ) + segmentation_mask: Selector(kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND]) = Field( + name="Segmentation Mask", + description="Segmentation masks", + examples=["$steps.model.predictions"], ) prompt: Union[ - ScalarSelector(kind=[STRING_KIND]), - BatchSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), str, ] = Field( description="Prompt to inpainting model (what you wish to see)", @@ -85,8 +80,8 @@ class BlockManifest(WorkflowBlockManifest): ) negative_prompt: Optional[ Union[ - ScalarSelector(kind=[STRING_KIND]), - BatchSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), str, ] ] = Field( @@ -94,7 +89,7 @@ class BlockManifest(WorkflowBlockManifest): description="Negative prompt to inpainting model (what you do not wish to see)", examples=["my prompt", "$inputs.prompt"], ) - api_key: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + api_key: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Your Stability AI API key", examples=["xxx-xxx", "$inputs.stability_ai_api_key"], private=True, diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index a6e678364..1050a02ce 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -28,10 +28,9 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -67,8 +66,8 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/yolo_world_model@v1", "YoloWorldModel", "YoloWorld"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - class_names: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + class_names: Union[Selector(kind=[LIST_OF_VALUES_KIND]), List[str]] = Field( description="One or more classes that you want YOLO-World to detect. The model accepts any string as an input, though does best with short descriptions of common objects.", examples=[["person", "car", "license plate"], "$inputs.class_names"], ) @@ -83,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): "l", "x", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( default="v2-s", description="Variant of YoloWorld model", @@ -91,7 +90,7 @@ class BlockManifest(WorkflowBlockManifest): ) confidence: Union[ Optional[FloatZeroToOne], - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.005, description="Confidence threshold for detections", @@ -99,8 +98,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index 6fcbf61f6..e2ba1fd96 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -36,11 +36,10 @@ ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -78,25 +77,23 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowInstanceSegmentationModel", "InstanceSegmentationModel", ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( - RoboflowModelField - ) - class_agnostic_nms: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="Value to decide if NMS is to be used in class-agnostic mode.", examples=[True, "$inputs.class_agnostic_nms"], ) - class_filter: Union[ - Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) - ] = Field( - default=None, - description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", - examples=[["a", "b", "c"], "$inputs.class_filter"], + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], + ) ) confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -104,25 +101,25 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) mask_decode_mode: Union[ Literal["accurate", "tradeoff", "fast"], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( default="accurate", description="Parameter of mask decoding in prediction post-processing.", @@ -130,19 +127,19 @@ class BlockManifest(WorkflowBlockManifest): ) tradeoff_factor: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.0, description="Post-processing parameter to dictate tradeoff between fast and accurate", examples=[0.3, "$inputs.tradeoff_factor"], ) - disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " @@ -151,8 +148,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index 9bf7c8ae7..44b44e5c4 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -37,11 +37,10 @@ ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -79,25 +78,23 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowKeypointDetectionModel", "KeypointsDetectionModel", ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( - RoboflowModelField - ) - class_agnostic_nms: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="Value to decide if NMS is to be used in class-agnostic mode.", examples=[True, "$inputs.class_agnostic_nms"], ) - class_filter: Union[ - Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) - ] = Field( - default=None, - description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", - examples=[["a", "b", "c"], "$inputs.class_filter"], + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], + ) ) confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -105,37 +102,37 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) keypoint_confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.0, description="Confidence threshold to predict keypoint as visible.", examples=[0.3, "$inputs.keypoint_confidence"], ) - disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " @@ -144,8 +141,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index a956d3d7e..c4531c004 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -31,11 +31,10 @@ ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -73,25 +72,23 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowClassificationModel", "ClassificationModel", ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( - RoboflowModelField - ) + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", examples=[0.3, "$inputs.confidence_threshold"], ) - disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " @@ -100,8 +97,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index 504931aa9..946b8a917 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -31,11 +31,10 @@ ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -73,25 +72,23 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowMultiLabelClassificationModel", "MultiLabelClassificationModel", ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( - RoboflowModelField - ) + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", examples=[0.3, "$inputs.confidence_threshold"], ) - disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " @@ -100,8 +97,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index adecc4859..128efb409 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -34,11 +34,10 @@ ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, RoboflowModelField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -76,27 +75,23 @@ class BlockManifest(WorkflowBlockManifest): "RoboflowObjectDetectionModel", "ObjectDetectionModel", ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - model_id: Union[ScalarSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( - RoboflowModelField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[Optional[bool], Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], ) - class_agnostic_nms: Union[Optional[bool], ScalarSelector(kind=[BOOLEAN_KIND])] = ( + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( Field( - default=False, - description="Value to decide if NMS is to be used in class-agnostic mode.", - examples=[True, "$inputs.class_agnostic_nms"], + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], ) ) - class_filter: Union[ - Optional[List[str]], ScalarSelector(kind=[LIST_OF_VALUES_KIND]) - ] = Field( - default=None, - description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", - examples=[["a", "b", "c"], "$inputs.class_filter"], - ) confidence: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Confidence threshold for predictions", @@ -104,29 +99,29 @@ class BlockManifest(WorkflowBlockManifest): ) iou_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.3, description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", examples=[0.4, "$inputs.iou_threshold"], ) - max_detections: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=300, description="Maximum number of detections to return", examples=[300, "$inputs.max_detections"], ) - max_candidates: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=3000, description="Maximum number of candidates as NMS input to be taken into account.", examples=[3000, "$inputs.max_candidates"], ) - disable_active_learning: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Parameter to decide if Active Learning data sampling is disabled for the model", examples=[True, "$inputs.disable_active_learning"], ) active_learning_target_dataset: Union[ - ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] ] = Field( default=None, description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " @@ -135,8 +130,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py index 1358a9d4c..4579f487f 100644 --- a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py @@ -24,8 +24,8 @@ from inference.core.workflows.execution_engine.entities.types import ( BAR_CODE_DETECTION_KIND, IMAGE_KIND, - BatchSelector, ImageInputField, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -56,11 +56,11 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/barcode_detector@v1", "BarcodeDetector", "BarcodeDetection" ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py index d8075e7d4..801c4d818 100644 --- a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py @@ -24,8 +24,8 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, QR_CODE_DETECTION_KIND, - BatchSelector, ImageInputField, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -56,11 +56,11 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/qr_code_detector@v1", "QRCodeDetector", "QRCodeDetection" ] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index 76413aa53..22432df4c 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -29,8 +29,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -178,14 +177,14 @@ class BlockManifest(WorkflowBlockManifest): "During last 5 minutes detected \{\{ $parameters.num_instances \}\} instances" ], ) - sender_email: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + sender_email: Union[str, Selector(kind=[STRING_KIND])] = Field( description="E-mail to be used to send the message", examples=["sender@gmail.com"], ) receiver_email: Union[ str, List[str], - ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + Selector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] = Field( description="Destination e-mail address", examples=["receiver@gmail.com"], @@ -194,7 +193,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ str, List[str], - ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + Selector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] ] = Field( default=None, @@ -205,7 +204,7 @@ class BlockManifest(WorkflowBlockManifest): Union[ str, List[str], - ScalarSelector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), + Selector(kind=[STRING_KIND, LIST_OF_VALUES_KIND]), ] ] = Field( default=None, @@ -214,7 +213,7 @@ class BlockManifest(WorkflowBlockManifest): ) message_parameters: Dict[ str, - Union[ScalarSelector(), BatchSelector(), str, int, float, bool], + Union[Selector(), Selector(), str, int, float, bool], ] = Field( description="References data to be used to construct each and every column", examples=[ @@ -236,16 +235,16 @@ class BlockManifest(WorkflowBlockManifest): ], default_factory=dict, ) - attachments: Dict[str, BatchSelector(kind=[STRING_KIND, BYTES_KIND])] = Field( + attachments: Dict[str, Selector(kind=[STRING_KIND, BYTES_KIND])] = Field( description="Attachments", default_factory=dict, examples=[{"report.cvs": "$steps.csv_formatter.csv_content"}], ) - smtp_server: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + smtp_server: Union[str, Selector(kind=[STRING_KIND])] = Field( description="Custom SMTP server to be used", examples=["$inputs.smtp_server", "smtp.google.com"], ) - sender_email_password: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + sender_email_password: Union[str, Selector(kind=[STRING_KIND])] = Field( description="Sender e-mail password be used when authenticating to SMTP server", private=True, examples=["$inputs.email_password"], @@ -258,20 +257,20 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag dictating if sink is supposed to be executed in the background, " "not waiting on status of registration before end of workflow run. Use `True` if best-effort " "registration is needed, use `False` while debugging and if error handling is needed", examples=["$inputs.fire_and_forget", False], ) - disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[False, "$inputs.disable_email_notifications"], ) - cooldown_seconds: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + cooldown_seconds: Union[int, Selector(kind=[INTEGER_KIND])] = Field( default=5, description="Number of seconds to wait until follow-up notification can be sent", examples=["$inputs.cooldown_seconds", 3], diff --git a/inference/core/workflows/core_steps/sinks/local_file/v1.py b/inference/core/workflows/core_steps/sinks/local_file/v1.py index c91dd909f..ee067ba9d 100644 --- a/inference/core/workflows/core_steps/sinks/local_file/v1.py +++ b/inference/core/workflows/core_steps/sinks/local_file/v1.py @@ -11,8 +11,7 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -77,7 +76,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/local_file_sink@v1"] - content: BatchSelector(kind=[STRING_KIND]) = Field( + content: Selector(kind=[STRING_KIND]) = Field( description="Content of the file to save", examples=["$steps.csv_formatter.csv_content"], ) @@ -102,11 +101,11 @@ class BlockManifest(WorkflowBlockManifest): } }, ) - target_directory: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + target_directory: Union[Selector(kind=[STRING_KIND]), str] = Field( description="Target directory", examples=["some/location"], ) - file_name_prefix: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + file_name_prefix: Union[Selector(kind=[STRING_KIND]), str] = Field( default="workflow_output", description="File name prefix", examples=["my_file"], @@ -114,7 +113,7 @@ class BlockManifest(WorkflowBlockManifest): "always_visible": True, }, ) - max_entries_per_file: Union[int, ScalarSelector(kind=[STRING_KIND])] = Field( + max_entries_per_file: Union[int, Selector(kind=[STRING_KIND])] = Field( default=1024, description="Defines how many datapoints can be appended to a single file", examples=[1024], diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index 92166ef95..3e1aeb0eb 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -20,8 +20,7 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -55,7 +54,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_custom_metadata@v1", "RoboflowCustomMetadata"] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -68,8 +67,8 @@ class BlockManifest(WorkflowBlockManifest): ) field_value: Union[ str, - ScalarSelector(kind=[STRING_KIND]), - BatchSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( description="This is the name of the metadata field you are creating", examples=["toronto", "pass", "fail"], @@ -78,7 +77,7 @@ class BlockManifest(WorkflowBlockManifest): description="Name of the field to be updated in Roboflow Customer Metadata", examples=["The name of the value of the field"], ) - fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag dictating if sink is supposed to be executed in the background, " "not waiting on status of registration before end of workflow run. Use `True` if best-effort " diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index 0e4612cba..1ec9db3d7 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -64,9 +64,8 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -103,9 +102,9 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_dataset_upload@v1", "RoboflowDatasetUpload"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + images: Selector(kind=[IMAGE_KIND]) = ImageInputField predictions: Optional[ - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -118,7 +117,7 @@ class BlockManifest(WorkflowBlockManifest): description="Reference q detection-like predictions", examples=["$steps.object_detection_model.predictions"], ) - target_project: Union[ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( + target_project: Union[Selector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( description="name of Roboflow dataset / project to be used as target for collected data", examples=["my_dataset", "$inputs.target_al_dataset"], ) @@ -163,25 +162,25 @@ class BlockManifest(WorkflowBlockManifest): description="Compression level for images registered", examples=[75], ) - registration_tags: List[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + registration_tags: List[Union[Selector(kind=[STRING_KIND]), str]] = Field( default_factory=list, description="Tags to be attached to registered datapoints", examples=[["location-florida", "factory-name", "$inputs.dynamic_tag"]], ) - disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[True, "$inputs.disable_active_learning"], ) - fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag dictating if sink is supposed to be executed in the background, " "not waiting on status of registration before end of workflow run. Use `True` if best-effort " "registration is needed, use `False` while debugging and if error handling is needed", examples=[True], ) - labeling_batch_prefix: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + labeling_batch_prefix: Union[str, Selector(kind=[STRING_KIND])] = Field( default="workflows_data_collector", description="Prefix of the name for labeling batches that will be registered in Roboflow app", examples=["my_labeling_batch_name"], @@ -195,8 +194,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images", "predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index 035a5bb24..ba3293530 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -26,9 +26,8 @@ OBJECT_DETECTION_PREDICTION_KIND, ROBOFLOW_PROJECT_KIND, STRING_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -67,8 +66,8 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/roboflow_dataset_upload@v2"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - target_project: Union[ScalarSelector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + target_project: Union[Selector(kind=[ROBOFLOW_PROJECT_KIND]), str] = Field( description="name of Roboflow dataset / project to be used as target for collected data", examples=["my_dataset", "$inputs.target_al_dataset"], ) @@ -79,7 +78,7 @@ class BlockManifest(WorkflowBlockManifest): json_schema_extra={"hidden": True}, ) predictions: Optional[ - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -93,14 +92,12 @@ class BlockManifest(WorkflowBlockManifest): examples=["$steps.object_detection_model.predictions"], json_schema_extra={"always_visible": True}, ) - data_percentage: Union[FloatZeroToHundred, ScalarSelector(kind=[FLOAT_KIND])] = ( - Field( - default=100, - description="Percent of data that will be saved (in range [0.0, 100.0])", - examples=[True, False, "$inputs.persist_predictions"], - ) + data_percentage: Union[FloatZeroToHundred, Selector(kind=[FLOAT_KIND])] = Field( + default=100, + description="Percent of data that will be saved (in range [0.0, 100.0])", + examples=[True, False, "$inputs.persist_predictions"], ) - persist_predictions: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + persist_predictions: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag to decide if predictions should be registered along with images", examples=[True, False, "$inputs.persist_predictions"], @@ -136,24 +133,24 @@ class BlockManifest(WorkflowBlockManifest): description="Compression level for images registered", examples=[95, 75], ) - registration_tags: List[Union[ScalarSelector(kind=[STRING_KIND]), str]] = Field( + registration_tags: List[Union[Selector(kind=[STRING_KIND]), str]] = Field( default_factory=list, description="Tags to be attached to registered datapoints", examples=[["location-florida", "factory-name", "$inputs.dynamic_tag"]], ) - disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[True, "$inputs.disable_active_learning"], ) - fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag dictating if sink is supposed to be executed in the background, " "not waiting on status of registration before end of workflow run. Use `True` if best-effort " "registration is needed, use `False` while debugging and if error handling is needed", ) - labeling_batch_prefix: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( + labeling_batch_prefix: Union[str, Selector(kind=[STRING_KIND])] = Field( default="workflows_data_collector", description="Prefix of the name for labeling batches that will be registered in Roboflow app", examples=["my_labeling_batch_name"], @@ -167,8 +164,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images", "predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index 445fbe98b..ed4d8b4eb 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -27,8 +27,7 @@ ROBOFLOW_PROJECT_KIND, STRING_KIND, TOP_CLASS_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -164,7 +163,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/webhook_sink@v1"] - url: Union[ScalarSelector(kind=[STRING_KIND]), str] = Field( + url: Union[Selector(kind=[STRING_KIND]), str] = Field( description="URL of the resource to make request", ) method: Literal["GET", "POST", "PUT"] = Field( @@ -173,8 +172,8 @@ class BlockManifest(WorkflowBlockManifest): query_parameters: Dict[ str, Union[ - ScalarSelector(kind=QUERY_PARAMS_KIND), - BatchSelector(kind=QUERY_PARAMS_KIND), + Selector(kind=QUERY_PARAMS_KIND), + Selector(kind=QUERY_PARAMS_KIND), str, float, bool, @@ -189,8 +188,8 @@ class BlockManifest(WorkflowBlockManifest): headers: Dict[ str, Union[ - ScalarSelector(kind=HEADER_KIND), - BatchSelector(kind=HEADER_KIND), + Selector(kind=HEADER_KIND), + Selector(kind=HEADER_KIND), str, float, bool, @@ -204,8 +203,8 @@ class BlockManifest(WorkflowBlockManifest): json_payload: Dict[ str, Union[ - ScalarSelector(), - BatchSelector(), + Selector(), + Selector(), str, float, bool, @@ -233,8 +232,8 @@ class BlockManifest(WorkflowBlockManifest): multi_part_encoded_files: Dict[ str, Union[ - ScalarSelector(), - BatchSelector(), + Selector(), + Selector(), str, float, bool, @@ -265,8 +264,8 @@ class BlockManifest(WorkflowBlockManifest): form_data: Dict[ str, Union[ - ScalarSelector(), - BatchSelector(), + Selector(), + Selector(), str, float, bool, @@ -291,25 +290,25 @@ class BlockManifest(WorkflowBlockManifest): ], default_factory=dict, ) - request_timeout: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + request_timeout: Union[int, Selector(kind=[INTEGER_KIND])] = Field( default=2, description="Number of seconds to wait for remote API response", examples=["$inputs.request_timeout", 10], ) - fire_and_forget: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + fire_and_forget: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=True, description="Boolean flag dictating if sink is supposed to be executed in the background, " "not waiting on status of registration before end of workflow run. Use `True` if best-effort " "registration is needed, use `False` while debugging and if error handling is needed", examples=["$inputs.fire_and_forget", True], ) - disable_sink: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( + disable_sink: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( default=False, description="boolean flag that can be also reference to input - to arbitrarily disable " "data collection for specific request", examples=[False, "$inputs.disable_email_notifications"], ) - cooldown_seconds: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + cooldown_seconds: Union[int, Selector(kind=[INTEGER_KIND])] = Field( default=5, description="Number of seconds to wait until follow-up notification can be sent", json_schema_extra={ diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py index ed9c43dae..5da88e680 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py @@ -11,9 +11,8 @@ from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, INTEGER_KIND, - BatchSelector, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -43,27 +42,27 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/absolute_static_crop@v1", "AbsoluteStaticCrop"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - x_center: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + x_center: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Center X of static crop (absolute coordinate)", examples=[40, "$inputs.center_x"], ) - y_center: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + y_center: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Center Y of static crop (absolute coordinate)", examples=[40, "$inputs.center_y"], ) - width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + width: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Width of static crop (absolute value)", examples=[40, "$inputs.width"], ) - height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + height: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Height of static crop (absolute value)", examples=[40, "$inputs.height"], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py index 8481ca532..8514f6cb4 100644 --- a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py +++ b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -46,8 +46,8 @@ class BoundingRectManifest(WorkflowBlockManifest): "block_type": "transformation", } ) - type: Literal[f"roboflow_core/bounding_rect@v1"] - predictions: BatchSelector( + type: Literal["roboflow_core/bounding_rect@v1"] + predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] @@ -56,10 +56,6 @@ class BoundingRectManifest(WorkflowBlockManifest): examples=["$segmentation.predictions"], ) - @classmethod - def accepts_batch_input(cls) -> bool: - return False - @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py index 1b0009168..4360c5324 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py @@ -12,9 +12,8 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - WorkflowVideoMetadataSelector, + VIDEO_METADATA_KIND, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -51,8 +50,8 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/byte_tracker@v1"] - metadata: WorkflowVideoMetadataSelector - detections: BatchSelector( + metadata: Selector(kind=[VIDEO_METADATA_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -61,28 +60,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py index 20a59be4a..f7152ae32 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py @@ -13,8 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, + Selector, WorkflowImageSelector, ) from inference.core.workflows.prototypes.block import ( @@ -58,7 +57,7 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/byte_tracker@v2"] image: WorkflowImageSelector - detections: BatchSelector( + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -67,28 +66,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py index dfcbc11af..6864a54f0 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py @@ -11,12 +11,11 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -72,8 +71,8 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): protected_namespaces=(), ) type: Literal["roboflow_core/byte_tracker@v3"] - image: WorkflowImageSelector - detections: BatchSelector( + image: Selector(kind=[IMAGE_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -82,28 +81,28 @@ class ByteTrackerBlockManifest(WorkflowBlockManifest): description="Objects to be tracked", examples=["$steps.object_detection_model.predictions"], ) - track_activation_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + track_activation_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.25, description="Detection confidence threshold for track activation." " Increasing track_activation_threshold improves accuracy and stability but might miss true detections." " Decreasing it increases completeness but risks introducing noise and instability.", examples=[0.25, "$inputs.confidence"], ) - lost_track_buffer: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + lost_track_buffer: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=30, description="Number of frames to buffer when a track is lost." " Increasing lost_track_buffer enhances occlusion handling, significantly reducing" " the likelihood of track fragmentation or disappearance caused by brief detection gaps.", examples=[30, "$inputs.lost_track_buffer"], ) - minimum_matching_threshold: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + minimum_matching_threshold: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.8, description="Threshold for matching tracks with detections." " Increasing minimum_matching_threshold improves accuracy but risks fragmentation." " Decreasing it improves completeness but risks false positives and drift.", examples=[0.8, "$inputs.min_matching_threshold"], ) - minimum_consecutive_frames: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + minimum_consecutive_frames: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=1, description="Number of consecutive frames that an object must be tracked before it is considered a 'valid' track." " Increasing minimum_consecutive_frames prevents the creation of accidental tracks from false detection" diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index d3aec0b40..284cdece5 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -19,8 +19,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -51,7 +50,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detection_offset@v1", "DetectionOffset"] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -61,20 +60,20 @@ class BlockManifest(WorkflowBlockManifest): description="Reference to detection-like predictions", examples=["$steps.object_detection_model.predictions"], ) - offset_width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + offset_width: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Offset for boxes width", examples=[10, "$inputs.offset_x"], validation_alias=AliasChoices("offset_width", "offset_x"), ) - offset_height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + offset_height: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( description="Offset for boxes height", examples=[10, "$inputs.offset_y"], validation_alias=AliasChoices("offset_height", "offset_y"), ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index 308eafc8b..198bedc98 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -18,9 +18,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -71,7 +69,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/detections_filter@v1", "DetectionsFilter"] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -86,7 +84,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], + Selector(), ] = Field( description="References to additional parameters that may be provided in runtime to parametrise operations", examples=[ @@ -98,8 +96,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 7d40254e4..86cb349c5 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -24,9 +24,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -85,7 +83,7 @@ class BlockManifest(WorkflowBlockManifest): type: Literal[ "roboflow_core/detections_transformation@v1", "DetectionsTransformation" ] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -101,7 +99,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[WorkflowImageSelector, ScalarSelector(), BatchSelector()], + Union[Selector(points_to_batch=True), Selector(points_to_batch=False)], ] = Field( description="References to additional parameters that may be provided in runtime to parameterize operations", examples=[ @@ -113,8 +111,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions", "operations_parameters"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 3d5c19948..4b891e20e 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -22,10 +22,7 @@ OBJECT_DETECTION_PREDICTION_KIND, RGB_COLOR_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -60,13 +57,13 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/dynamic_crop@v1", "DynamicCrop", "Crop"] - images: BatchSelector(kind=[IMAGE_KIND]) = Field( + images: Selector(kind=[IMAGE_KIND]) = Field( title="Image to Crop", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -79,7 +76,7 @@ class BlockManifest(WorkflowBlockManifest): validation_alias=AliasChoices("predictions", "detections"), ) mask_opacity: Union[ - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), float, ] = Field( default=0.0, @@ -97,8 +94,8 @@ class BlockManifest(WorkflowBlockManifest): }, ) background_color: Union[ - ScalarSelector(kind=[STRING_KIND]), - BatchSelector(kind=[RGB_COLOR_KIND]), + Selector(kind=[STRING_KIND]), + Selector(kind=[RGB_COLOR_KIND]), str, Tuple[int, int, int], ] = Field( @@ -110,8 +107,8 @@ class BlockManifest(WorkflowBlockManifest): ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images", "predictions"] @classmethod def get_output_dimensionality_offset(cls) -> int: diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index 87ae6dee6..befbab639 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -13,8 +13,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,7 +48,7 @@ class DynamicZonesManifest(WorkflowBlockManifest): } ) type: Literal[f"{TYPE}", "DynamicZone"] - predictions: BatchSelector( + predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] @@ -57,14 +56,14 @@ class DynamicZonesManifest(WorkflowBlockManifest): description="", examples=["$segmentation.predictions"], ) - required_number_of_vertices: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + required_number_of_vertices: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Keep simplifying polygon until number of vertices matches this number", examples=[4, "$inputs.vertices"], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py index 31cc7f4ef..6529a0dc2 100644 --- a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py +++ b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py @@ -15,8 +15,7 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -57,25 +56,25 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/image_slicer@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Image to slice", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - slice_width: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + slice_width: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=640, description="Width of each slice, in pixels", examples=[320, "$inputs.slice_width"], ) - slice_height: Union[PositiveInt, ScalarSelector(kind=[INTEGER_KIND])] = Field( + slice_height: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( default=640, description="Height of each slice, in pixels", examples=[320, "$inputs.slice_height"], ) overlap_ratio_width: Union[ Annotated[float, Field(ge=0.0, lt=1.0)], - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.2, description="Overlap ratio between consecutive slices in the width dimension", @@ -83,7 +82,7 @@ class BlockManifest(WorkflowBlockManifest): ) overlap_ratio_height: Union[ Annotated[float, Field(ge=0.0, lt=1.0)], - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.2, description="Overlap ratio between consecutive slices in the height dimension", diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index a0218c20c..010394e23 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -23,10 +23,7 @@ LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -62,7 +59,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/perspective_correction@v1", "PerspectiveCorrection"] predictions: Optional[ - BatchSelector( + Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -73,36 +70,36 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest): default=None, examples=["$steps.object_detection_model.predictions"], ) - images: BatchSelector(kind=[IMAGE_KIND]) = Field( + images: Selector(kind=[IMAGE_KIND]) = Field( title="Image to Crop", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("images", "image"), ) - perspective_polygons: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + perspective_polygons: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Perspective polygons (for each batch at least one must be consisting of 4 vertices)", examples=["$steps.perspective_wrap.zones"], ) - transformed_rect_width: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + transformed_rect_width: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Transformed rect width", default=1000, examples=[1000] ) - transformed_rect_height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + transformed_rect_height: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Transformed rect height", default=1000, examples=[1000] ) - extend_perspective_polygon_by_detections_anchor: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + extend_perspective_polygon_by_detections_anchor: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description=f"If set, perspective polygons will be extended to contain all bounding boxes. Allowed values: {', '.join(sv.Position.list())}", default="", examples=["CENTER"], ) - warp_image: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + warp_image: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description=f"If set to True, image will be warped into transformed rect", default=False, examples=[False], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images", "predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py index 58554f037..b387f68fc 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py @@ -11,10 +11,9 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, - BatchSelector, FloatZeroToOne, ImageInputField, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -44,33 +43,27 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/relative_statoic_crop@v1", "RelativeStaticCrop"] - images: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField - x_center: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( - Field( - description="Center X of static crop (relative coordinate 0.0-1.0)", - examples=[0.3, "$inputs.center_x"], - ) + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + x_center: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + description="Center X of static crop (relative coordinate 0.0-1.0)", + examples=[0.3, "$inputs.center_x"], ) - y_center: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( - Field( - description="Center Y of static crop (relative coordinate 0.0-1.0)", - examples=[0.3, "$inputs.center_y"], - ) + y_center: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + description="Center Y of static crop (relative coordinate 0.0-1.0)", + examples=[0.3, "$inputs.center_y"], ) - width: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + width: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( description="Width of static crop (relative value 0.0-1.0)", examples=[0.3, "$inputs.width"], ) - height: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = ( - Field( - description="Height of static crop (relative value 0.0-1.0)", - examples=[0.3, "$inputs.height"], - ) + height: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( + description="Height of static crop (relative value 0.0-1.0)", + examples=[0.3, "$inputs.height"], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py index d25d0bdfd..7fd6758a9 100644 --- a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py @@ -11,12 +11,11 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -46,8 +45,8 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stabilize_detections@v1"] - image: WorkflowImageSelector - detections: BatchSelector( + image: Selector(kind=[IMAGE_KIND]) + detections: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -56,14 +55,14 @@ class BlockManifest(WorkflowBlockManifest): description="Tracked detections", examples=["$steps.object_detection_model.predictions"], ) - smoothing_window_size: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + smoothing_window_size: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=3, description="Predicted movement of detection will be smoothed based on historical measurements of velocity," " this parameter controls number of historical measurements taken under account when calculating smoothed velocity." " Detections will be removed from generating smoothed predictions if they had been missing for longer than this number of frames.", examples=[5, "$inputs.smoothing_window_size"], ) - bbox_smoothing_coefficient: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + bbox_smoothing_coefficient: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=0.2, description="Bounding box smoothing coefficient applied when given tracker_id is present on current frame." " This parameter must be initialized with value between 0 and 1", diff --git a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py index be55d0535..151d678c0 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py @@ -14,8 +14,7 @@ FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, INTEGER_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -47,26 +46,26 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stitch_images@v1"] - image1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image1: Selector(kind=[IMAGE_KIND]) = Field( title="First image to stitch", description="First input image for this step.", examples=["$inputs.image1"], validation_alias=AliasChoices("image1"), ) - image2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image2: Selector(kind=[IMAGE_KIND]) = Field( title="Second image to stitch", description="Second input image for this step.", examples=["$inputs.image2"], validation_alias=AliasChoices("image2"), ) - max_allowed_reprojection_error: Union[Optional[float], ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + max_allowed_reprojection_error: Union[Optional[float], Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore default=3, description="Advanced parameter overwriting cv.findHomography ransacReprojThreshold parameter." " Maximum allowed reprojection error to treat a point pair as an inlier." " Increasing value of this parameter for low details photo may yield better results.", examples=[3, "$inputs.min_overlap_ratio_w"], ) - count_of_best_matches_per_query_descriptor: Union[Optional[int], ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_of_best_matches_per_query_descriptor: Union[Optional[int], Selector(kind=[INTEGER_KIND])] = Field( # type: ignore default=2, description="Advanced parameter overwriting cv.BFMatcher.knnMatch `k` parameter." " Count of best matches found per each query descriptor or less if a query descriptor has less than k possible matches in total.", diff --git a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py index a4894bf5c..6aaed0849 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_ocr_detections/v1.py @@ -13,8 +13,7 @@ INTEGER_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -96,7 +95,7 @@ class BlockManifest(WorkflowBlockManifest): } ) type: Literal["roboflow_core/stitch_ocr_detections@v1"] - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, ] @@ -135,7 +134,7 @@ class BlockManifest(WorkflowBlockManifest): } }, ) - tolerance: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + tolerance: Union[int, Selector(kind=[INTEGER_KIND])] = Field( title="Tolerance", description="The tolerance for grouping detections into the same line of text.", default=10, @@ -154,8 +153,8 @@ def ensure_tolerance_greater_than_zero( return value @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/visualizations/background_color/v1.py b/inference/core/workflows/core_steps/visualizations/background_color/v1.py index 829ea60b2..733693abf 100644 --- a/inference/core/workflows/core_steps/visualizations/background_color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/background_color/v1.py @@ -17,7 +17,7 @@ FLOAT_ZERO_TO_ONE_KIND, STRING_KIND, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -45,13 +45,13 @@ class BackgroundColorManifest(PredictionsVisualizationManifest): } ) - color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the background.", default="BLACK", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/blur/v1.py b/inference/core/workflows/core_steps/visualizations/blur/v1.py index e6cbd9a34..807e12950 100644 --- a/inference/core/workflows/core_steps/visualizations/blur/v1.py +++ b/inference/core/workflows/core_steps/visualizations/blur/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -36,7 +36,7 @@ class BlurManifest(PredictionsVisualizationManifest): } ) - kernel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + kernel_size: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for blurring.", default=15, examples=[15, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py index 99cf3de84..3e6e66bab 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py @@ -15,7 +15,7 @@ FLOAT_ZERO_TO_ONE_KIND, INTEGER_KIND, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -40,13 +40,13 @@ class BoundingBoxManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the bounding box in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - roundness: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + roundness: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Roundness of the corners of the bounding box.", default=0.0, examples=[0.0, "$inputs.roundness"], diff --git a/inference/core/workflows/core_steps/visualizations/circle/v1.py b/inference/core/workflows/core_steps/visualizations/circle/v1.py index ad2826687..ad96a8f7f 100644 --- a/inference/core/workflows/core_steps/visualizations/circle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/circle/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,7 +38,7 @@ class CircleManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/color/v1.py b/inference/core/workflows/core_steps/visualizations/color/v1.py index cbc719bb5..fcffc9821 100644 --- a/inference/core/workflows/core_steps/visualizations/color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/color/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -39,7 +39,7 @@ class ColorManifest(ColorableVisualizationManifest): } ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the color overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/common/base.py b/inference/core/workflows/core_steps/visualizations/common/base.py index 94a11f001..dc6442afa 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base.py +++ b/inference/core/workflows/core_steps/visualizations/common/base.py @@ -14,10 +14,7 @@ INSTANCE_SEGMENTATION_PREDICTION_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, - ScalarSelector, - StepOutputImageSelector, - WorkflowImageSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -35,13 +32,13 @@ class VisualizationManifest(WorkflowBlockManifest, ABC): "block_type": "visualization", } ) - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", examples=["$inputs.image", "$steps.cropping.crops"], validation_alias=AliasChoices("image", "images"), ) - copy_image: Union[bool, ScalarSelector(kind=[BOOLEAN_KIND])] = Field( # type: ignore + copy_image: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( # type: ignore description="Duplicate the image contents (vs overwriting the image in place). Deselect for chained visualizations that should stack on previous ones where the intermediate state is not needed.", default=True, examples=[True, False], @@ -80,7 +77,7 @@ def run( class PredictionsVisualizationManifest(VisualizationManifest, ABC): - predictions: BatchSelector( + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, diff --git a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py index ac8911b66..810f4f3db 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py +++ b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py @@ -14,7 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult @@ -74,7 +74,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): # "Matplotlib Oranges_R", # "Matplotlib Reds_R", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="DEFAULT", description="Color palette to use for annotations.", @@ -83,14 +83,14 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): palette_size: Union[ int, - ScalarSelector(kind=[INTEGER_KIND]), + Selector(kind=[INTEGER_KIND]), ] = Field( # type: ignore default=10, description="Number of colors in the color palette. Applies when using a matplotlib `color_palette`.", examples=[10, "$inputs.palette_size"], ) - custom_colors: Union[List[str], ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = ( + custom_colors: Union[List[str], Selector(kind=[LIST_OF_VALUES_KIND])] = ( Field( # type: ignore default=[], description='List of colors to use for annotations when `color_palette` is set to "CUSTOM".', @@ -100,7 +100,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): color_axis: Union[ Literal["INDEX", "CLASS", "TRACK"], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CLASS", description="Strategy to use for mapping colors to annotations.", diff --git a/inference/core/workflows/core_steps/visualizations/corner/v1.py b/inference/core/workflows/core_steps/visualizations/corner/v1.py index 4f940f3e8..866ecb26d 100644 --- a/inference/core/workflows/core_steps/visualizations/corner/v1.py +++ b/inference/core/workflows/core_steps/visualizations/corner/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,13 +38,13 @@ class CornerManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=4, examples=[4, "$inputs.thickness"], ) - corner_length: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + corner_length: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Length of the corner lines in pixels.", default=15, examples=[15, "$inputs.corner_length"], diff --git a/inference/core/workflows/core_steps/visualizations/crop/v1.py b/inference/core/workflows/core_steps/visualizations/crop/v1.py index 7b16f66eb..bdf399ace 100644 --- a/inference/core/workflows/core_steps/visualizations/crop/v1.py +++ b/inference/core/workflows/core_steps/visualizations/crop/v1.py @@ -15,7 +15,7 @@ FLOAT_KIND, INTEGER_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -53,20 +53,20 @@ class CropManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the crop.", examples=["CENTER", "$inputs.position"], ) - scale_factor: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + scale_factor: Union[float, Selector(kind=[FLOAT_KIND])] = Field( # type: ignore description="The factor by which to scale the cropped image part. A factor of 2, for example, would double the size of the cropped area, allowing for a closer view of the detection.", default=2.0, examples=[2.0, "$inputs.scale_factor"], ) - border_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.border_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/dot/v1.py b/inference/core/workflows/core_steps/visualizations/dot/v1.py index c21f31774..1b63013e8 100644 --- a/inference/core/workflows/core_steps/visualizations/dot/v1.py +++ b/inference/core/workflows/core_steps/visualizations/dot/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -54,20 +54,20 @@ class DotManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CENTER", description="The anchor position for placing the dot.", examples=["CENTER", "$inputs.position"], ) - radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + radius: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the dot in pixels.", default=4, examples=[4, "$inputs.radius"], ) - outline_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + outline_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the dot in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py index 5fa14f2eb..9eeeb9f9b 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py @@ -13,7 +13,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -38,19 +38,19 @@ class EllipseManifest(ColorableVisualizationManifest): } ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - start_angle: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + start_angle: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Starting angle of the ellipse in degrees.", default=-45, examples=[-45, "$inputs.start_angle"], ) - end_angle: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + end_angle: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Ending angle of the ellipse in degrees.", default=235, examples=[235, "$inputs.end_angle"], diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index 07738c138..c6e085fb4 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -18,9 +18,8 @@ FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -46,7 +45,7 @@ class HaloManifest(ColorableVisualizationManifest): } ) - predictions: BatchSelector( + predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] @@ -55,13 +54,13 @@ class HaloManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the halo overlay.", default=0.8, examples=[0.8, "$inputs.opacity"], ) - kernel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + kernel_size: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the average pooling kernel used for creating the halo.", default=40, examples=[40, "$inputs.kernel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index cdb6571d7..3bc88b0a2 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -16,8 +16,7 @@ INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -48,7 +47,7 @@ class KeypointManifest(VisualizationManifest): } ) - predictions: BatchSelector( + predictions: Selector( kind=[ KEYPOINT_DETECTION_PREDICTION_KIND, ] @@ -63,13 +62,13 @@ class KeypointManifest(VisualizationManifest): json_schema_extra={"always_visible": True}, ) - color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the keypoint.", default="#A351FB", examples=["#A351FB", "green", "$inputs.color"], ) - text_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + text_color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Text color of the keypoint.", default="black", examples=["black", "$inputs.text_color"], @@ -81,7 +80,7 @@ class KeypointManifest(VisualizationManifest): }, }, ) - text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, Selector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=0.5, examples=[0.5, "$inputs.text_scale"], @@ -94,7 +93,7 @@ class KeypointManifest(VisualizationManifest): }, ) - text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text characters.", default=1, examples=[1, "$inputs.text_thickness"], @@ -107,7 +106,7 @@ class KeypointManifest(VisualizationManifest): }, ) - text_padding: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_padding: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Padding around the text in pixels.", default=10, examples=[10, "$inputs.text_padding"], @@ -120,7 +119,7 @@ class KeypointManifest(VisualizationManifest): }, ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.thickness"], @@ -133,7 +132,7 @@ class KeypointManifest(VisualizationManifest): }, ) - radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + radius: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the keypoint in pixels.", default=10, examples=[10, "$inputs.radius"], diff --git a/inference/core/workflows/core_steps/visualizations/label/v1.py b/inference/core/workflows/core_steps/visualizations/label/v1.py index b54ce8c79..e10342dba 100644 --- a/inference/core/workflows/core_steps/visualizations/label/v1.py +++ b/inference/core/workflows/core_steps/visualizations/label/v1.py @@ -16,7 +16,7 @@ FLOAT_KIND, INTEGER_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -54,7 +54,7 @@ class LabelManifest(ColorableVisualizationManifest): "Tracker Id", "Time In Zone", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="Class", description="The type of text to display.", @@ -74,38 +74,38 @@ class LabelManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_LEFT", description="The anchor position for placing the label.", examples=["CENTER", "$inputs.text_position"], ) - text_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + text_color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the text.", default="WHITE", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.text_color"], ) - text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, Selector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=1.0, examples=[1.0, "$inputs.text_scale"], ) - text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text characters.", default=1, examples=[1, "$inputs.text_thickness"], ) - text_padding: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_padding: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Padding around the text in pixels.", default=10, examples=[10, "$inputs.text_padding"], ) - border_radius: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + border_radius: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Radius of the label in pixels.", default=0, examples=[0, "$inputs.border_radius"], diff --git a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py index 9d854aa8b..818419a90 100644 --- a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py @@ -19,9 +19,8 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -47,43 +46,43 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Line in the format [[x1, y1], [x2, y2]] consisting of exactly two points.", examples=[[[0, 50], [500, 50]], "$inputs.zones"], ) - color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], ) - text_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + text_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the text in pixels.", default=1, examples=[1, "$inputs.text_thickness"], ) - text_scale: Union[float, ScalarSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + text_scale: Union[float, Selector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", default=1.0, examples=[1.0, "$inputs.text_scale"], ) - count_in: Union[int, ScalarSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_in: Union[int, Selector(kind=[INTEGER_KIND]), Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed into the line zone.", default=0, examples=["$steps.line_counter.count_in"], json_schema_extra={"always_visible": True}, ) - count_out: Union[int, ScalarSelector(kind=[INTEGER_KIND]), BatchSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + count_out: Union[int, Selector(kind=[INTEGER_KIND]), Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Reference to the number of objects that crossed out of the line zone.", default=0, examples=["$steps.line_counter.count_out"], json_schema_extra={"always_visible": True}, ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.3, examples=[0.3, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index bb8b7568f..d762ac77e 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -14,9 +14,8 @@ from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -42,7 +41,7 @@ class MaskManifest(ColorableVisualizationManifest): } ) - predictions: BatchSelector( + predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] @@ -51,7 +50,7 @@ class MaskManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.5, examples=[0.5, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py index 64399f05c..15d75938e 100644 --- a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py +++ b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py @@ -19,9 +19,8 @@ KEYPOINT_DETECTION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -52,7 +51,7 @@ class ModelComparisonManifest(VisualizationManifest): } ) - predictions_a: BatchSelector( + predictions_a: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -63,13 +62,13 @@ class ModelComparisonManifest(VisualizationManifest): examples=["$steps.object_detection_model.predictions"], ) - color_a: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color_a: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas Model A predicted that Model B did not..", default="GREEN", examples=["GREEN", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_a"], ) - predictions_b: BatchSelector( + predictions_b: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, @@ -80,19 +79,19 @@ class ModelComparisonManifest(VisualizationManifest): examples=["$steps.object_detection_model.predictions"], ) - color_b: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color_b: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas Model B predicted that Model A did not.", default="RED", examples=["RED", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.color_b"], ) - background_color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + background_color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the areas neither model predicted.", default="BLACK", examples=["BLACK", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the overlay.", default=0.7, examples=[0.7, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py index a15ab297a..b788ec424 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py @@ -11,7 +11,7 @@ from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -36,7 +36,7 @@ class PixelateManifest(PredictionsVisualizationManifest): } ) - pixel_size: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + pixel_size: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Size of the pixelation.", default=20, examples=[20, "$inputs.pixel_size"], diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index 5a0ad2e11..2858b7c3a 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -17,8 +17,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -44,7 +43,7 @@ class PolygonManifest(ColorableVisualizationManifest): } ) - predictions: BatchSelector( + predictions: Selector( kind=[ INSTANCE_SEGMENTATION_PREDICTION_KIND, ] @@ -53,7 +52,7 @@ class PolygonManifest(ColorableVisualizationManifest): examples=["$steps.instance_segmentation_model.predictions"], ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py index e78477109..406155150 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py @@ -17,9 +17,8 @@ FLOAT_ZERO_TO_ONE_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, FloatZeroToOne, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -45,17 +44,17 @@ class PolygonZoneVisualizationManifest(VisualizationManifest): "block_type": "visualization", } ) - zone: Union[list, BatchSelector(kind=[LIST_OF_VALUES_KIND]), ScalarSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore + zone: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore description="Polygon zones (one for each batch) in a format [[(x1, y1), (x2, y2), (x3, y3), ...], ...];" " each zone must consist of more than 2 points", examples=["$inputs.zones"], ) - color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - opacity: Union[FloatZeroToOne, ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore + opacity: Union[FloatZeroToOne, Selector(kind=[FLOAT_ZERO_TO_ONE_KIND])] = Field( # type: ignore description="Transparency of the Mask overlay.", default=0.3, examples=[0.3, "$inputs.opacity"], diff --git a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py index b803acf5e..2c921b856 100644 --- a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py +++ b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py @@ -14,8 +14,7 @@ INTEGER_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -45,18 +44,18 @@ class ReferencePathVisualizationManifest(VisualizationManifest): ) reference_path: Union[ list, - BatchSelector(kind=[LIST_OF_VALUES_KIND]), - ScalarSelector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), ] = Field( # type: ignore description="Reference path in a format [(x1, y1), (x2, y2), (x3, y3), ...]", examples=["$inputs.expected_path"], ) - color: Union[str, ScalarSelector(kind=[STRING_KIND])] = Field( # type: ignore + color: Union[str, Selector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the zone.", default="#5bb573", examples=["WHITE", "#FFFFFF", "rgb(255, 255, 255)" "$inputs.background_color"], ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the lines in pixels.", default=2, examples=[2, "$inputs.thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/trace/v1.py b/inference/core/workflows/core_steps/visualizations/trace/v1.py index 0606e8f7e..46309f5d5 100644 --- a/inference/core/workflows/core_steps/visualizations/trace/v1.py +++ b/inference/core/workflows/core_steps/visualizations/trace/v1.py @@ -15,7 +15,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -51,18 +51,18 @@ class TraceManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="CENTER", description="The anchor position for placing the label.", examples=["CENTER", "$inputs.text_position"], ) - trace_length: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( + trace_length: Union[int, Selector(kind=[INTEGER_KIND])] = Field( default=30, description="Maximum number of historical tracked objects positions to display.", examples=[30, "$inputs.trace_length"], ) - thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the track visualization line.", default=1, examples=[1, "$inputs.track_thickness"], diff --git a/inference/core/workflows/core_steps/visualizations/triangle/v1.py b/inference/core/workflows/core_steps/visualizations/triangle/v1.py index e222d0ab1..6b8637617 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/triangle/v1.py @@ -14,7 +14,7 @@ from inference.core.workflows.execution_engine.entities.types import ( INTEGER_KIND, STRING_KIND, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest @@ -52,26 +52,26 @@ class TriangleManifest(ColorableVisualizationManifest): "BOTTOM_RIGHT", "CENTER_OF_MASS", ], - ScalarSelector(kind=[STRING_KIND]), + Selector(kind=[STRING_KIND]), ] = Field( # type: ignore default="TOP_CENTER", description="The anchor position for placing the triangle.", examples=["CENTER", "$inputs.position"], ) - base: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + base: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Base width of the triangle in pixels.", default=10, examples=[10, "$inputs.base"], ) - height: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + height: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Height of the triangle in pixels.", default=10, examples=[10, "$inputs.height"], ) - outline_thickness: Union[int, ScalarSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + outline_thickness: Union[int, Selector(kind=[INTEGER_KIND])] = Field( # type: ignore description="Thickness of the outline of the triangle in pixels.", default=0, examples=[2, "$inputs.outline_thickness"], diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 8faeb731e..75ad0a43a 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1,4 +1,4 @@ -from typing import List, Optional +from typing import List, Literal, Optional, Union from pydantic import AliasChoices, BaseModel, Field, StringConstraints from typing_extensions import Annotated @@ -1067,41 +1067,6 @@ def StepOutputSelector(kind: Optional[List[Kind]] = None): ] -def BatchSelector(kind: Optional[List[Kind]] = None): - if kind is None: - kind = [WILDCARD_KIND] - json_schema_extra = { - REFERENCE_KEY: True, - SELECTED_ELEMENT_KEY: BATCH_AS_SELECTED_ELEMENT, - KIND_KEY: [k.dict() for k in kind], - SELECTOR_POINTS_TO_BATCH_KEY: True, - } - return Annotated[ - str, - StringConstraints( - pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" - ), - Field(json_schema_extra=json_schema_extra), - ] - - -def ScalarSelector(kind: Optional[List[Kind]] = None): - if kind is None: - kind = [WILDCARD_KIND] - json_schema_extra = { - REFERENCE_KEY: True, - SELECTED_ELEMENT_KEY: SCALAR_AS_SELECTED_ELEMENT, - KIND_KEY: [k.dict() for k in kind], - } - return Annotated[ - str, - StringConstraints( - pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" - ), - Field(json_schema_extra=json_schema_extra), - ] - - def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): if kind is None: kind = [WILDCARD_KIND] @@ -1158,3 +1123,29 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): } ), ] + + +def Selector( + kind: Optional[List[Kind]] = None, + points_to_batch: Union[bool, Literal["dynamic"]] = "dynamic", +): + if kind is None: + kind = [WILDCARD_KIND] + selected_element_key = "any" + if points_to_batch is True: + selected_element_key = f"{selected_element_key}_batch" + elif points_to_batch is False: + selected_element_key = f"{selected_element_key}_scalar" + json_schema_extra = { + REFERENCE_KEY: True, + SELECTED_ELEMENT_KEY: selected_element_key, + KIND_KEY: [k.dict() for k in kind], + SELECTOR_POINTS_TO_BATCH_KEY: points_to_batch, + } + return Annotated[ + str, + StringConstraints( + pattern=r"(^\$steps\.[A-Za-z_\-0-9]+\.[A-Za-z_*0-9\-]+$)|(^\$inputs.[A-Za-z_0-9\-]+$)" + ), + Field(json_schema_extra=json_schema_extra), + ] diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 2fb11b5bf..eccef909e 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -1,7 +1,7 @@ import itertools from collections import OrderedDict, defaultdict from dataclasses import replace -from typing import Dict, Optional, Type +from typing import Dict, Optional, Set, Type from inference.core.workflows.execution_engine.entities.types import ( KIND_KEY, @@ -59,10 +59,12 @@ def parse_block_manifest( dimensionality_reference_property = ( manifest_type.get_dimensionality_reference_property() ) + named_batch_inputs = set(manifest_type.get_parameters_accepting_batches()) return parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, ) @@ -70,6 +72,7 @@ def parse_block_manifest_schema( schema: dict, inputs_dimensionality_offsets: Dict[str, int], dimensionality_reference_property: Optional[str], + named_batch_inputs: Set[str], ) -> BlockManifestMetadata: primitive_types = retrieve_primitives_from_schema( schema=schema, @@ -78,6 +81,7 @@ def parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, ) return BlockManifestMetadata( primitive_types=primitive_types, @@ -226,6 +230,7 @@ def retrieve_selectors_from_schema( schema: dict, inputs_dimensionality_offsets: Dict[str, int], dimensionality_reference_property: Optional[str], + named_batch_inputs: Set[str], ) -> Dict[str, SelectorDefinition]: result = [] for property_name, property_definition in schema[PROPERTIES_KEY].items(): @@ -246,6 +251,7 @@ def retrieve_selectors_from_schema( property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, is_list_element=True, + named_batch_inputs=named_batch_inputs, ) elif ( property_definition.get(TYPE_KEY) == OBJECT_TYPE @@ -258,6 +264,7 @@ def retrieve_selectors_from_schema( property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, is_dict_element=True, + named_batch_inputs=named_batch_inputs, ) else: selector = retrieve_selectors_from_simple_property( @@ -266,6 +273,7 @@ def retrieve_selectors_from_schema( property_definition=property_definition, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, ) if selector is not None: result.append(selector) @@ -278,10 +286,14 @@ def retrieve_selectors_from_simple_property( property_definition: dict, property_dimensionality_offset: int, is_dimensionality_reference_property: bool, + named_batch_inputs: Set[str], is_list_element: bool = False, is_dict_element: bool = False, ) -> Optional[SelectorDefinition]: if REFERENCE_KEY in property_definition: + points_to_batch = property_definition.get(SELECTOR_POINTS_TO_BATCH_KEY, False) + if points_to_batch == "dynamic": + points_to_batch = property_name in named_batch_inputs allowed_references = [ ReferenceDefinition( selected_element=property_definition[SELECTED_ELEMENT_KEY], @@ -289,9 +301,7 @@ def retrieve_selectors_from_simple_property( Kind.model_validate(k) for k in property_definition.get(KIND_KEY, []) ], - points_to_batch=property_definition.get( - SELECTOR_POINTS_TO_BATCH_KEY, False - ), + points_to_batch=points_to_batch, ) ] return SelectorDefinition( @@ -313,6 +323,7 @@ def retrieve_selectors_from_simple_property( property_definition=property_definition[ITEMS_KEY], property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, is_list_element=True, ) if property_defines_union(property_definition=property_definition): @@ -324,6 +335,7 @@ def retrieve_selectors_from_simple_property( is_dict_element=is_dict_element, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, ) return None @@ -344,6 +356,7 @@ def retrieve_selectors_from_union_definition( is_dict_element: bool, property_dimensionality_offset: int, is_dimensionality_reference_property: bool, + named_batch_inputs: Set[str], ) -> Optional[SelectorDefinition]: union_types = ( union_definition.get(ANY_OF_KEY, []) @@ -358,6 +371,7 @@ def retrieve_selectors_from_union_definition( property_definition=type_definition, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, + named_batch_inputs=named_batch_inputs, is_list_element=is_list_element, ) if result is None: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 7691c9dd4..8c075c36d 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -678,11 +678,14 @@ def denote_data_flow_for_step( parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ PARSED_NODE_INPUT_SELECTORS_PROPERTY ] - input_property2batch_expected = {} + input_property2batch_expected = defaultdict(set) for parsed_selector in parsed_step_input_selectors: - input_property2batch_expected[parsed_selector.definition.property_name] = { - ref.points_to_batch for ref in parsed_selector.definition.allowed_references - } + input_property2batch_expected[parsed_selector.definition.property_name].update( + { + ref.points_to_batch + for ref in parsed_selector.definition.allowed_references + } + ) for property_name, input_definition in input_data.items(): if property_name not in input_property2batch_expected: # only values plugged vi selectors are to be validated diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 41d84e824..04f4920b4 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -12,9 +12,8 @@ from inference.core.workflows.execution_engine.entities.base import OutputDefinition from inference.core.workflows.execution_engine.entities.types import ( WILDCARD_KIND, - BatchSelector, Kind, - ScalarSelector, + Selector, StepOutputImageSelector, StepOutputSelector, WorkflowImageSelector, @@ -251,10 +250,8 @@ def collect_python_types_for_selectors( result.append(WorkflowParameterSelector(kind=selector_kind)) elif selector_type is SelectorType.STEP_OUTPUT: result.append(StepOutputSelector(kind=selector_kind)) - elif selector_type is SelectorType.BATCH: - result.append(BatchSelector(kind=selector_kind)) - elif selector_type is SelectorType.SCALAR: - result.append(ScalarSelector(kind=selector_kind)) + elif selector_type is SelectorType.GENERIC: + result.append(Selector(kind=selector_kind)) else: raise DynamicBlockError( public_message=f"Could not recognise selector type `{selector_type}` declared for input `{input_name}` " @@ -362,8 +359,19 @@ def assembly_manifest_class_methods( describe_outputs = lambda cls: outputs_definitions setattr(manifest_class, "describe_outputs", classmethod(describe_outputs)) setattr(manifest_class, "get_actual_outputs", describe_outputs) - accepts_batch_input = lambda cls: manifest_description.accepts_batch_input + accepts_batch_input = ( + lambda cls: len(manifest_description.batch_oriented_parameters) > 0 + or manifest_description.accepts_batch_input + ) setattr(manifest_class, "accepts_batch_input", classmethod(accepts_batch_input)) + get_parameters_accepting_batches = ( + lambda cls: manifest_description.batch_oriented_parameters + ) + setattr( + manifest_class, + "get_parameters_accepting_batches", + classmethod(get_parameters_accepting_batches), + ) input_dimensionality_offsets = collect_input_dimensionality_offsets( inputs=manifest_description.inputs ) diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 53faec027..7b0a90bc7 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -9,8 +9,7 @@ class SelectorType(Enum): STEP_OUTPUT_IMAGE = "step_output_image" INPUT_PARAMETER = "input_parameter" STEP_OUTPUT = "step_output" - BATCH = "batch" - SCALAR = "scalar" + GENERIC = "generic" class ValueType(Enum): @@ -106,6 +105,11 @@ class ManifestDescription(BaseModel): default=False, description="Flag to decide if empty (optional) values will be shipped as run() function parameters", ) + batch_oriented_parameters: List[str] = Field( + default_factory=list, + description="List of batch-oriented parameters. Value will override `accepts_batch_input` if non-empty " + "list is provided, `accepts_batch_input` kept not to break backward compatibility.", + ) class PythonCode(BaseModel): diff --git a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py index 2f38e488f..6d2f7b8ea 100644 --- a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py +++ b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py @@ -36,21 +36,34 @@ "workflow_video_metadata": {"WorkflowVideoMetadata"}, "workflow_image": {"WorkflowImage", "InferenceImage"}, "workflow_parameter": {"WorkflowParameter", "InferenceParameter"}, - "scalar": {"WorkflowParameter", "InferenceParameter"}, - "batch": { + "any_scalar": {"WorkflowParameter", "InferenceParameter"}, + "any_batch": { "WorkflowVideoMetadata", "WorkflowImage", "InferenceImage", "WorkflowBatchInput", }, + "any": { + "WorkflowVideoMetadata", + "WorkflowImage", + "InferenceImage", + "WorkflowBatchInput", + "WorkflowParameter", + "InferenceParameter", + }, } INPUT_TYPE_TO_SELECTED_ELEMENT = { - "WorkflowVideoMetadata": {"workflow_video_metadata", "batch"}, - "WorkflowImage": {"workflow_image", "batch"}, - "InferenceImage": {"workflow_image", "batch"}, - "WorkflowParameter": {"workflow_parameter", "scalar"}, - "InferenceParameter": {"workflow_parameter", "scalar"}, - "WorkflowBatchInput": {"batch", "workflow_image", "workflow_video_metadata"}, + "WorkflowVideoMetadata": {"workflow_video_metadata", "any_batch", "any"}, + "WorkflowImage": {"workflow_image", "any_batch", "any"}, + "InferenceImage": {"workflow_image", "any_batch", "any"}, + "WorkflowParameter": {"workflow_parameter", "any_scalar", "any"}, + "InferenceParameter": {"workflow_parameter", "any_scalar", "any"}, + "WorkflowBatchInput": { + "any_batch", + "workflow_image", + "workflow_video_metadata", + "any", + }, } diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index bdc9f644f..6010970a5 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -54,8 +54,12 @@ def get_output_dimensionality_offset( return 0 @classmethod - def accepts_batch_input(cls) -> bool: - return False + def accepts_batch_input(cls) -> Union[bool, List[str]]: + return len(cls.get_parameters_accepting_batches()) > 0 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return [] @classmethod def accepts_empty_values(cls) -> bool: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index ad5aac892..309d7dbbc 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -8,7 +8,7 @@ ) from inference.core.workflows.execution_engine.entities.types import ( FLOAT_ZERO_TO_ONE_KIND, - BatchSelector, + Selector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( @@ -61,8 +61,7 @@ class MixedInputWithoutBatchesBlockManifest(WorkflowBlockManifest): ) type: Literal["MixedInputWithoutBatchesBlock"] mixed_parameter: Union[ - WorkflowParameterSelector(), - BatchSelector(), + Selector(), Any, ] @@ -97,14 +96,14 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): ) type: Literal["MixedInputWithBatchesBlock"] mixed_parameter: Union[ - WorkflowParameterSelector(), - BatchSelector(), + Selector(points_to_batch=True), + Selector(points_to_batch=False), Any, ] @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["mixed_parameter"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -138,11 +137,11 @@ class BatchInputBlockProcessingBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockProcessingBatches"] - batch_parameter: BatchSelector() + batch_parameter: Selector() @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["batch_parameter"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -174,7 +173,7 @@ class BatchInputBlockProcessingNotBatchesManifest(WorkflowBlockManifest): } ) type: Literal["BatchInputBlockNotProcessingBatches"] - batch_parameter: BatchSelector() + batch_parameter: Selector() @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -192,7 +191,7 @@ class BatchInputNotProcessingBatchesBlock(WorkflowBlock): def get_manifest(cls) -> Type[WorkflowBlockManifest]: return BatchInputBlockProcessingNotBatchesManifest - def run(self, batch_parameter: Batch[Any]) -> BlockResult: + def run(self, batch_parameter: Any) -> BlockResult: return {"float_value": 0.4} @@ -206,7 +205,7 @@ class CompoundNonBatchInputBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundNonBatchInputBlock"] - compound_parameter: Dict[str, Union[WorkflowParameterSelector(), Any]] + compound_parameter: Dict[str, Union[Selector(), Any]] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -239,12 +238,12 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): ) type: Literal["CompoundMixedInputBlockManifestBlock"] compound_parameter: Dict[ - str, Union[WorkflowParameterSelector(), BatchSelector(), Any] + str, Union[Selector(points_to_batch=True), Selector(points_to_batch=False), Any] ] @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["compound_parameter"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -281,11 +280,11 @@ class CompoundStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundStrictBatchBlock"] - compound_parameter: Dict[str, Union[BatchSelector()]] + compound_parameter: Dict[str, Selector()] @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["compound_parameter"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -320,7 +319,7 @@ class CompoundNonStrictBatchBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundNonStrictBatchBlock"] - compound_parameter: Dict[str, Union[BatchSelector()]] + compound_parameter: Dict[str, Union[Selector()]] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py index 3d8630aaa..8a708bb3e 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/scalar_selectors_plugin/__init__.py @@ -13,8 +13,7 @@ IMAGE_KIND, LIST_OF_VALUES_KIND, STRING_KIND, - BatchSelector, - ScalarSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -49,15 +48,15 @@ def run(self) -> BlockResult: class BlockManifest(WorkflowBlockManifest): type: Literal["secret_store_user"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", ) - secret: ScalarSelector(kind=[STRING_KIND]) + secret: Selector(kind=[STRING_KIND]) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> List[str]: + return ["image"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -82,7 +81,7 @@ def run(self, image: Batch[WorkflowImageData], secret: str) -> BlockResult: class BatchSecretBlockManifest(WorkflowBlockManifest): type: Literal["batch_secret_store"] - image: BatchSelector(kind=[IMAGE_KIND]) = Field( + image: Selector(kind=[IMAGE_KIND]) = Field( title="Input Image", description="The input image for this step.", ) @@ -110,7 +109,7 @@ def run(self, image: WorkflowImageData) -> BlockResult: class NonBatchSecretStoreUserBlockManifest(WorkflowBlockManifest): type: Literal["non_batch_secret_store_user"] - secret: ScalarSelector(kind=[STRING_KIND]) + secret: Selector(kind=[STRING_KIND]) @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -135,8 +134,8 @@ def run(self, secret: str) -> BlockResult: class BlockWithReferenceImagesManifest(WorkflowBlockManifest): type: Literal["reference_images_comparison"] - image: BatchSelector(kind=[IMAGE_KIND]) - reference_images: Union[ScalarSelector(kind=[LIST_OF_VALUES_KIND]), Any] + image: Selector(kind=[IMAGE_KIND]) + reference_images: Union[Selector(kind=[LIST_OF_VALUES_KIND]), Any] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: From c2c08c1021427cb1d47fb3ac19b62d6c83ee939c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 8 Nov 2024 10:29:46 +0100 Subject: [PATCH 23/67] WIP --- .../workflows/execution_engine/constants.py | 1 + .../v1/compiler/graph_constructor.py | 17 ++---- .../execution_engine/v1/executor/core.py | 11 +++- .../execution_data_manager/manager.py | 9 ++- .../step_input_assembler.py | 56 +++++++++++++++++++ .../__init__.py | 2 + ...st_workflow_with_arbitrary_batch_inputs.py | 19 +++++-- 7 files changed, 94 insertions(+), 21 deletions(-) diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index 055dcc594..cadcf1204 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -1,5 +1,6 @@ NODE_COMPILATION_OUTPUT_PROPERTY = "node_compilation_output" PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" +SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY = "scalar_parameters_to_broadcast" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" IMAGE_TYPE_KEY = "type" diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 8c075c36d..05b35e6d6 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -21,7 +21,7 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, - WORKFLOW_INPUT_BATCH_LINEAGE_ID, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY, ) from inference.core.workflows.execution_engine.entities.base import ( InputType, @@ -686,6 +686,8 @@ def denote_data_flow_for_step( for ref in parsed_selector.definition.allowed_references } ) + if SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY not in execution_graph.nodes[node]: + execution_graph.nodes[node][SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY] = set() for property_name, input_definition in input_data.items(): if property_name not in input_property2batch_expected: # only values plugged vi selectors are to be validated @@ -718,16 +720,9 @@ def denote_data_flow_for_step( and batch_input_expected == {True} and False in actual_input_is_batch ): - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property strictly requires batch-oriented inputs, yet the input selector " - f"holds non-batch oriented input - this indicates the " - f"problem with construction of your Workflow - usually the problem occurs when " - f"non-batch oriented step inputs are filled with outputs of non batch-oriented " - f"steps or non batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", - ) + execution_graph.nodes[node][ + SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY + ].add(property_name) if not parameters_with_batch_inputs: data_lineage = [] else: diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index b3816bde1..100cf89e0 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -1,6 +1,6 @@ from datetime import datetime from functools import partial -from typing import Any, Callable, Dict, List, Optional +from typing import Any, Callable, Dict, List, Optional, Set from inference.core import logger from inference.core.workflows.errors import ( @@ -8,6 +8,7 @@ StepExecutionError, WorkflowError, ) +from inference.core.workflows.execution_engine.constants import SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY from inference.core.workflows.execution_engine.profiling.core import ( NullWorkflowsProfiler, WorkflowsProfiler, @@ -172,10 +173,14 @@ def run_simd_step( step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest if step_manifest.accepts_batch_input(): + scalar_inputs_to_broadcast = workflow.execution_graph.nodes[step_selector].get( + SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY, set() + ) return run_simd_step_in_batch_mode( step_selector=step_selector, step_instance=step_instance, execution_data_manager=execution_data_manager, + scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, profiler=profiler, ) return run_simd_step_in_non_batch_mode( @@ -190,6 +195,7 @@ def run_simd_step_in_batch_mode( step_selector: str, step_instance: WorkflowBlock, execution_data_manager: ExecutionDataManager, + scalar_inputs_to_broadcast: Set[str], profiler: Optional[WorkflowsProfiler] = None, ) -> None: with profiler.profile_execution_phase( @@ -198,7 +204,8 @@ def run_simd_step_in_batch_mode( metadata={"step": step_selector}, ): step_input = execution_data_manager.get_simd_step_input( - step_selector=step_selector + step_selector=step_selector, + scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) with profiler.profile_execution_phase( name="step_code_execution", diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 349cc1e46..5ac6da066 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Generator, List, Optional, Tuple, Union +from typing import Any, Dict, Generator, List, Optional, Tuple, Union, Set from networkx import DiGraph @@ -149,7 +149,11 @@ def register_non_simd_step_output( outputs=output, ) - def get_simd_step_input(self, step_selector: str) -> BatchModeSIMDStepInput: + def get_simd_step_input( + self, + step_selector: str, + scalar_inputs_to_broadcast: Set[str], + ) -> BatchModeSIMDStepInput: if not self.is_step_simd(step_selector=step_selector): raise ExecutionEngineRuntimeError( public_message=f"Error in execution engine. In context of non-SIMD step: {step_selector} attempts to " @@ -170,6 +174,7 @@ def get_simd_step_input(self, step_selector: str) -> BatchModeSIMDStepInput: execution_cache=self._execution_cache, dynamic_batches_manager=self._dynamic_batches_manager, branching_manager=self._branching_manager, + scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) def iterate_over_simd_step_input( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 6d1a56d45..6fd48f056 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -217,7 +217,10 @@ def construct_simd_step_input( execution_cache: ExecutionCache, dynamic_batches_manager: DynamicBatchesManager, branching_manager: BranchingManager, + scalar_inputs_to_broadcast: Optional[Set[str]] = None ) -> BatchModeSIMDStepInput: + if scalar_inputs_to_broadcast is None: + scalar_inputs_to_broadcast = set() masks = construct_mask_for_all_inputs_dimensionalities( step_node=step_node, branching_manager=branching_manager, @@ -228,6 +231,7 @@ def construct_simd_step_input( masks=masks, runtime_parameters=runtime_parameters, execution_cache=execution_cache, + scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) @@ -322,10 +326,12 @@ def prepare_parameters( masks: Dict[int, Optional[Set[DynamicBatchIndex]]], runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, + scalar_inputs_to_broadcast: Set[str], ) -> BatchModeSIMDStepInput: result = {} indices_for_parameter = {} guard_of_indices_wrapping = GuardForIndicesWrapping() + compound_inputs = set() for parameter_name, parameter_specs in step_node.input_data.items(): if parameter_specs.is_compound_input(): result[parameter_name], indices_for_parameter[parameter_name] = ( @@ -339,6 +345,7 @@ def prepare_parameters( guard_of_indices_wrapping=guard_of_indices_wrapping, ) ) + compound_inputs.add(parameter_name) else: result[parameter_name], indices_for_parameter[parameter_name] = ( get_non_compound_parameter_value( @@ -369,6 +376,12 @@ def prepare_parameters( empty_indices = get_empty_batch_elements_indices(value=result) indices = [e for e in indices if e not in empty_indices] result = remove_indices(value=result, indices=empty_indices) + result = broadcast_scalar_inputs( + parameters=result, + indices=indices, + scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, + compound_inputs=compound_inputs, + ) return BatchModeSIMDStepInput( indices=indices, parameters=result, @@ -629,6 +642,49 @@ def remove_indices(value: Any, indices: Set[DynamicBatchIndex]) -> Any: return value +def broadcast_scalar_inputs( + parameters: Dict[str, Any], + indices: List[DynamicBatchIndex], + scalar_inputs_to_broadcast: Set[str], + compound_inputs: Set[str], +) -> Dict[str, Any]: + print(f"scalar_inputs_to_broadcast: {scalar_inputs_to_broadcast}") + for input_name in scalar_inputs_to_broadcast: + parameters[input_name] = broadcast_scalar_input( + input_parameter=parameters[input_name], + indices=indices, + is_compound=input_name in compound_inputs, + ) + return parameters + + +def broadcast_scalar_input( + input_parameter: Any, + indices: List[DynamicBatchIndex], + is_compound: bool = False, +) -> Any: + if is_compound and isinstance(input_parameter, dict): + return { + k: broadcast_scalar_input( + input_parameter=v, + indices=indices, + ) for k, v in input_parameter.items() + } + if is_compound and isinstance(input_parameter, list): + return [ + broadcast_scalar_input( + input_parameter=v, + indices=indices, + ) for v in input_parameter + ] + if isinstance(input_parameter, Batch): + return input_parameter + return Batch( + content=[input_parameter] * len(indices), + indices=indices, + ) + + def unfold_parameters( parameters: Dict[str, Any] ) -> Generator[Dict[str, Any], None, None]: diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 309d7dbbc..21bab4d90 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -160,6 +160,8 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return BatchInputBlockProcessingBatchesManifest def run(self, batch_parameter: Batch[Any]) -> BlockResult: + if not isinstance(batch_parameter, Batch): + raise ValueError("Batch[X] must be provided") return [{"float_value": 0.4}] * len(batch_parameter) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 2b2f293c1..446ccb79e 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -792,14 +792,21 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run( + runtime_parameters={ + "non_batch_parameter": "dummy" + } + ) + + # then + print(result) WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { From 75ce9a49a02339b8e81c1652019f1a5b79d14f14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 8 Nov 2024 14:22:59 +0100 Subject: [PATCH 24/67] WIP --- .../workflows/core_steps/formatters/csv/v1.py | 5 +- .../detections_transformation/v1.py | 8 ++- .../workflows/execution_engine/constants.py | 1 - .../execution_engine/entities/types.py | 10 +--- .../introspection/entities.py | 2 +- .../introspection/schema_parser.py | 59 +++++++++++-------- .../v1/compiler/graph_constructor.py | 26 ++++---- .../v1/dynamic_blocks/block_assembler.py | 8 ++- .../v1/dynamic_blocks/entities.py | 8 ++- .../execution_engine/v1/executor/core.py | 7 --- .../execution_data_manager/manager.py | 2 - .../step_input_assembler.py | 54 ----------------- .../v1/introspection/inputs_discovery.py | 18 ++---- inference/core/workflows/prototypes/block.py | 6 +- .../__init__.py | 5 +- ...st_workflow_with_arbitrary_batch_inputs.py | 21 +++---- 16 files changed, 93 insertions(+), 147 deletions(-) diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index 03fd5e2af..cb22eb5cf 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -136,8 +136,7 @@ class BlockManifest(WorkflowBlockManifest): columns_data: Dict[ str, Union[ - Selector(points_to_batch=False), - Selector(points_to_batch=True), + Selector(), str, int, float, @@ -174,7 +173,7 @@ def protect_timestamp_column(cls, value: dict) -> dict: return value @classmethod - def get_parameters_accepting_batches(cls) -> List[str]: + def get_parameters_accepting_mixed_input(cls) -> List[str]: return ["columns_data"] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 86cb349c5..6cf539937 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -99,7 +99,7 @@ class BlockManifest(WorkflowBlockManifest): ) operations_parameters: Dict[ str, - Union[Selector(points_to_batch=True), Selector(points_to_batch=False)], + Selector(), ] = Field( description="References to additional parameters that may be provided in runtime to parameterize operations", examples=[ @@ -112,7 +112,11 @@ class BlockManifest(WorkflowBlockManifest): @classmethod def get_parameters_accepting_batches(cls) -> List[str]: - return ["predictions", "operations_parameters"] + return ["predictions"] + + @classmethod + def get_parameters_accepting_mixed_input(cls) -> List[str]: + return ["operations_parameters"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index cadcf1204..055dcc594 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -1,6 +1,5 @@ NODE_COMPILATION_OUTPUT_PROPERTY = "node_compilation_output" PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" -SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY = "scalar_parameters_to_broadcast" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" IMAGE_TYPE_KEY = "type" diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index 75ad0a43a..bbc7d3079 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1127,20 +1127,14 @@ def WorkflowParameterSelector(kind: Optional[List[Kind]] = None): def Selector( kind: Optional[List[Kind]] = None, - points_to_batch: Union[bool, Literal["dynamic"]] = "dynamic", ): if kind is None: kind = [WILDCARD_KIND] - selected_element_key = "any" - if points_to_batch is True: - selected_element_key = f"{selected_element_key}_batch" - elif points_to_batch is False: - selected_element_key = f"{selected_element_key}_scalar" json_schema_extra = { REFERENCE_KEY: True, - SELECTED_ELEMENT_KEY: selected_element_key, + SELECTED_ELEMENT_KEY: "any", KIND_KEY: [k.dict() for k in kind], - SELECTOR_POINTS_TO_BATCH_KEY: points_to_batch, + SELECTOR_POINTS_TO_BATCH_KEY: "dynamic", } return Annotated[ str, diff --git a/inference/core/workflows/execution_engine/introspection/entities.py b/inference/core/workflows/execution_engine/introspection/entities.py index 786f9a633..3a8938ad2 100644 --- a/inference/core/workflows/execution_engine/introspection/entities.py +++ b/inference/core/workflows/execution_engine/introspection/entities.py @@ -18,7 +18,7 @@ class ReferenceDefinition: selected_element: str kind: List[Kind] - points_to_batch: bool + points_to_batch: Set[bool] @dataclass(frozen=True) diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index eccef909e..2eff1bfa1 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -59,12 +59,14 @@ def parse_block_manifest( dimensionality_reference_property = ( manifest_type.get_dimensionality_reference_property() ) - named_batch_inputs = set(manifest_type.get_parameters_accepting_batches()) + inputs_accepting_batches = set(manifest_type.get_parameters_accepting_batches()) + inputs_accepting_batches_and_scalars = set(manifest_type.get_parameters_accepting_mixed_input()) return parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars ) @@ -72,7 +74,8 @@ def parse_block_manifest_schema( schema: dict, inputs_dimensionality_offsets: Dict[str, int], dimensionality_reference_property: Optional[str], - named_batch_inputs: Set[str], + inputs_accepting_batches: Set[str], + inputs_accepting_batches_and_scalars: Set[str], ) -> BlockManifestMetadata: primitive_types = retrieve_primitives_from_schema( schema=schema, @@ -81,7 +84,8 @@ def parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) return BlockManifestMetadata( primitive_types=primitive_types, @@ -230,7 +234,8 @@ def retrieve_selectors_from_schema( schema: dict, inputs_dimensionality_offsets: Dict[str, int], dimensionality_reference_property: Optional[str], - named_batch_inputs: Set[str], + inputs_accepting_batches: Set[str], + inputs_accepting_batches_and_scalars: Set[str], ) -> Dict[str, SelectorDefinition]: result = [] for property_name, property_definition in schema[PROPERTIES_KEY].items(): @@ -251,7 +256,8 @@ def retrieve_selectors_from_schema( property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, is_list_element=True, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) elif ( property_definition.get(TYPE_KEY) == OBJECT_TYPE @@ -264,7 +270,8 @@ def retrieve_selectors_from_schema( property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, is_dict_element=True, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) else: selector = retrieve_selectors_from_simple_property( @@ -273,7 +280,8 @@ def retrieve_selectors_from_schema( property_definition=property_definition, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) if selector is not None: result.append(selector) @@ -286,14 +294,20 @@ def retrieve_selectors_from_simple_property( property_definition: dict, property_dimensionality_offset: int, is_dimensionality_reference_property: bool, - named_batch_inputs: Set[str], + inputs_accepting_batches: Set[str], + inputs_accepting_batches_and_scalars: Set[str], is_list_element: bool = False, is_dict_element: bool = False, ) -> Optional[SelectorDefinition]: if REFERENCE_KEY in property_definition: - points_to_batch = property_definition.get(SELECTOR_POINTS_TO_BATCH_KEY, False) - if points_to_batch == "dynamic": - points_to_batch = property_name in named_batch_inputs + declared_points_to_batch = property_definition.get(SELECTOR_POINTS_TO_BATCH_KEY, False) + if declared_points_to_batch == "dynamic": + if property_name in inputs_accepting_batches_and_scalars: + points_to_batch = {True, False} + else: + points_to_batch = {property_name in inputs_accepting_batches} + else: + points_to_batch = {declared_points_to_batch} allowed_references = [ ReferenceDefinition( selected_element=property_definition[SELECTED_ELEMENT_KEY], @@ -323,7 +337,8 @@ def retrieve_selectors_from_simple_property( property_definition=property_definition[ITEMS_KEY], property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, is_list_element=True, ) if property_defines_union(property_definition=property_definition): @@ -335,7 +350,8 @@ def retrieve_selectors_from_simple_property( is_dict_element=is_dict_element, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) return None @@ -356,7 +372,8 @@ def retrieve_selectors_from_union_definition( is_dict_element: bool, property_dimensionality_offset: int, is_dimensionality_reference_property: bool, - named_batch_inputs: Set[str], + inputs_accepting_batches: Set[str], + inputs_accepting_batches_and_scalars: Set[str], ) -> Optional[SelectorDefinition]: union_types = ( union_definition.get(ANY_OF_KEY, []) @@ -371,7 +388,8 @@ def retrieve_selectors_from_union_definition( property_definition=type_definition, property_dimensionality_offset=property_dimensionality_offset, is_dimensionality_reference_property=is_dimensionality_reference_property, - named_batch_inputs=named_batch_inputs, + inputs_accepting_batches=inputs_accepting_batches, + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, is_list_element=is_list_element, ) if result is None: @@ -381,19 +399,14 @@ def retrieve_selectors_from_union_definition( itertools.chain.from_iterable(r.allowed_references for r in results) ) results_references_kind_by_selected_element = defaultdict(set) - results_references_batch_pointing_by_selected_element = defaultdict(bool) + results_references_batch_pointing_by_selected_element = defaultdict(set) for reference in results_references: results_references_kind_by_selected_element[reference.selected_element].update( reference.kind ) results_references_batch_pointing_by_selected_element[ reference.selected_element - ] = ( - results_references_batch_pointing_by_selected_element[ - reference.selected_element - ] - or reference.points_to_batch - ) + ].update(reference.points_to_batch) merged_references = [] for ( reference_selected_element, diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 05b35e6d6..b86aab06a 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -21,7 +21,7 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, - WORKFLOW_INPUT_BATCH_LINEAGE_ID, SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( InputType, @@ -30,7 +30,6 @@ ) from inference.core.workflows.execution_engine.entities.types import ( STEP_AS_SELECTED_ELEMENT, - WILDCARD_KIND, Kind, ) from inference.core.workflows.execution_engine.introspection.entities import ( @@ -680,14 +679,8 @@ def denote_data_flow_for_step( ] input_property2batch_expected = defaultdict(set) for parsed_selector in parsed_step_input_selectors: - input_property2batch_expected[parsed_selector.definition.property_name].update( - { - ref.points_to_batch - for ref in parsed_selector.definition.allowed_references - } - ) - if SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY not in execution_graph.nodes[node]: - execution_graph.nodes[node][SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY] = set() + for reference in parsed_selector.definition.allowed_references: + input_property2batch_expected[parsed_selector.definition.property_name].update(reference.points_to_batch) for property_name, input_definition in input_data.items(): if property_name not in input_property2batch_expected: # only values plugged vi selectors are to be validated @@ -720,9 +713,16 @@ def denote_data_flow_for_step( and batch_input_expected == {True} and False in actual_input_is_batch ): - execution_graph.nodes[node][ - SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY - ].add(property_name) + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference plugged " + f"into property `{property_name}` of step `{node}` - the step " + f"property strictly requires batch-oriented inputs, yet the input selector " + f"holds non-batch oriented input - this indicates the " + f"problem with construction of your Workflow - usually the problem occurs when " + f"non-batch oriented step inputs are filled with outputs of non batch-oriented " + f"steps or non batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) if not parameters_with_batch_inputs: data_lineage = [] else: diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 04f4920b4..b905712ea 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -360,7 +360,7 @@ def assembly_manifest_class_methods( setattr(manifest_class, "describe_outputs", classmethod(describe_outputs)) setattr(manifest_class, "get_actual_outputs", describe_outputs) accepts_batch_input = ( - lambda cls: len(manifest_description.batch_oriented_parameters) > 0 + lambda cls: len(manifest_description.batch_oriented_parameters) > 0 or len(manifest_description.mixed_parameters) or manifest_description.accepts_batch_input ) setattr(manifest_class, "accepts_batch_input", classmethod(accepts_batch_input)) @@ -372,6 +372,12 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches", classmethod(get_parameters_accepting_batches), ) + get_parameters_accepting_mixed_input = lambda cls: manifest_description.mixed_parameters + setattr( + manifest_class, + "get_parameters_accepting_mixed_input", + classmethod(get_parameters_accepting_mixed_input), + ) input_dimensionality_offsets = collect_input_dimensionality_offsets( inputs=manifest_description.inputs ) diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 7b0a90bc7..d230dbf59 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -108,7 +108,13 @@ class ManifestDescription(BaseModel): batch_oriented_parameters: List[str] = Field( default_factory=list, description="List of batch-oriented parameters. Value will override `accepts_batch_input` if non-empty " - "list is provided, `accepts_batch_input` kept not to break backward compatibility.", + "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", + ) + mixed_parameters: List[str] = Field( + default_factory=list, + description="List of parameters accepting both batches and scalars at the same time. " + "Value will override `accepts_batch_input` if non-empty " + "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", ) diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index 100cf89e0..494c7436c 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -8,7 +8,6 @@ StepExecutionError, WorkflowError, ) -from inference.core.workflows.execution_engine.constants import SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY from inference.core.workflows.execution_engine.profiling.core import ( NullWorkflowsProfiler, WorkflowsProfiler, @@ -173,14 +172,10 @@ def run_simd_step( step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest if step_manifest.accepts_batch_input(): - scalar_inputs_to_broadcast = workflow.execution_graph.nodes[step_selector].get( - SCALAR_PARAMETERS_TO_BROADCAST_PROPERTY, set() - ) return run_simd_step_in_batch_mode( step_selector=step_selector, step_instance=step_instance, execution_data_manager=execution_data_manager, - scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, profiler=profiler, ) return run_simd_step_in_non_batch_mode( @@ -195,7 +190,6 @@ def run_simd_step_in_batch_mode( step_selector: str, step_instance: WorkflowBlock, execution_data_manager: ExecutionDataManager, - scalar_inputs_to_broadcast: Set[str], profiler: Optional[WorkflowsProfiler] = None, ) -> None: with profiler.profile_execution_phase( @@ -205,7 +199,6 @@ def run_simd_step_in_batch_mode( ): step_input = execution_data_manager.get_simd_step_input( step_selector=step_selector, - scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) with profiler.profile_execution_phase( name="step_code_execution", diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 5ac6da066..2cd0088b1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -152,7 +152,6 @@ def register_non_simd_step_output( def get_simd_step_input( self, step_selector: str, - scalar_inputs_to_broadcast: Set[str], ) -> BatchModeSIMDStepInput: if not self.is_step_simd(step_selector=step_selector): raise ExecutionEngineRuntimeError( @@ -174,7 +173,6 @@ def get_simd_step_input( execution_cache=self._execution_cache, dynamic_batches_manager=self._dynamic_batches_manager, branching_manager=self._branching_manager, - scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) def iterate_over_simd_step_input( diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index 6fd48f056..cc5b01064 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -217,10 +217,7 @@ def construct_simd_step_input( execution_cache: ExecutionCache, dynamic_batches_manager: DynamicBatchesManager, branching_manager: BranchingManager, - scalar_inputs_to_broadcast: Optional[Set[str]] = None ) -> BatchModeSIMDStepInput: - if scalar_inputs_to_broadcast is None: - scalar_inputs_to_broadcast = set() masks = construct_mask_for_all_inputs_dimensionalities( step_node=step_node, branching_manager=branching_manager, @@ -231,7 +228,6 @@ def construct_simd_step_input( masks=masks, runtime_parameters=runtime_parameters, execution_cache=execution_cache, - scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, ) @@ -326,7 +322,6 @@ def prepare_parameters( masks: Dict[int, Optional[Set[DynamicBatchIndex]]], runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, - scalar_inputs_to_broadcast: Set[str], ) -> BatchModeSIMDStepInput: result = {} indices_for_parameter = {} @@ -376,12 +371,6 @@ def prepare_parameters( empty_indices = get_empty_batch_elements_indices(value=result) indices = [e for e in indices if e not in empty_indices] result = remove_indices(value=result, indices=empty_indices) - result = broadcast_scalar_inputs( - parameters=result, - indices=indices, - scalar_inputs_to_broadcast=scalar_inputs_to_broadcast, - compound_inputs=compound_inputs, - ) return BatchModeSIMDStepInput( indices=indices, parameters=result, @@ -642,49 +631,6 @@ def remove_indices(value: Any, indices: Set[DynamicBatchIndex]) -> Any: return value -def broadcast_scalar_inputs( - parameters: Dict[str, Any], - indices: List[DynamicBatchIndex], - scalar_inputs_to_broadcast: Set[str], - compound_inputs: Set[str], -) -> Dict[str, Any]: - print(f"scalar_inputs_to_broadcast: {scalar_inputs_to_broadcast}") - for input_name in scalar_inputs_to_broadcast: - parameters[input_name] = broadcast_scalar_input( - input_parameter=parameters[input_name], - indices=indices, - is_compound=input_name in compound_inputs, - ) - return parameters - - -def broadcast_scalar_input( - input_parameter: Any, - indices: List[DynamicBatchIndex], - is_compound: bool = False, -) -> Any: - if is_compound and isinstance(input_parameter, dict): - return { - k: broadcast_scalar_input( - input_parameter=v, - indices=indices, - ) for k, v in input_parameter.items() - } - if is_compound and isinstance(input_parameter, list): - return [ - broadcast_scalar_input( - input_parameter=v, - indices=indices, - ) for v in input_parameter - ] - if isinstance(input_parameter, Batch): - return input_parameter - return Batch( - content=[input_parameter] * len(indices), - indices=indices, - ) - - def unfold_parameters( parameters: Dict[str, Any] ) -> Generator[Dict[str, Any], None, None]: diff --git a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py index 6d2f7b8ea..3dfb1d746 100644 --- a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py +++ b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py @@ -36,13 +36,6 @@ "workflow_video_metadata": {"WorkflowVideoMetadata"}, "workflow_image": {"WorkflowImage", "InferenceImage"}, "workflow_parameter": {"WorkflowParameter", "InferenceParameter"}, - "any_scalar": {"WorkflowParameter", "InferenceParameter"}, - "any_batch": { - "WorkflowVideoMetadata", - "WorkflowImage", - "InferenceImage", - "WorkflowBatchInput", - }, "any": { "WorkflowVideoMetadata", "WorkflowImage", @@ -53,13 +46,12 @@ }, } INPUT_TYPE_TO_SELECTED_ELEMENT = { - "WorkflowVideoMetadata": {"workflow_video_metadata", "any_batch", "any"}, - "WorkflowImage": {"workflow_image", "any_batch", "any"}, - "InferenceImage": {"workflow_image", "any_batch", "any"}, - "WorkflowParameter": {"workflow_parameter", "any_scalar", "any"}, - "InferenceParameter": {"workflow_parameter", "any_scalar", "any"}, + "WorkflowVideoMetadata": {"workflow_video_metadata", "any"}, + "WorkflowImage": {"workflow_image", "any"}, + "InferenceImage": {"workflow_image", "any"}, + "WorkflowParameter": {"workflow_parameter", "any"}, + "InferenceParameter": {"workflow_parameter", "any"}, "WorkflowBatchInput": { - "any_batch", "workflow_image", "workflow_video_metadata", "any", diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 6010970a5..52ee672ce 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -55,12 +55,16 @@ def get_output_dimensionality_offset( @classmethod def accepts_batch_input(cls) -> Union[bool, List[str]]: - return len(cls.get_parameters_accepting_batches()) > 0 + return len(cls.get_parameters_accepting_batches()) > 0 or len(cls.get_parameters_accepting_mixed_input()) @classmethod def get_parameters_accepting_batches(cls) -> List[str]: return [] + @classmethod + def get_parameters_accepting_mixed_input(cls) -> List[str]: + return [] + @classmethod def accepts_empty_values(cls) -> bool: return False diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 21bab4d90..544cfac9d 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -96,8 +96,7 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): ) type: Literal["MixedInputWithBatchesBlock"] mixed_parameter: Union[ - Selector(points_to_batch=True), - Selector(points_to_batch=False), + Selector(), Any, ] @@ -240,7 +239,7 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): ) type: Literal["CompoundMixedInputBlockManifestBlock"] compound_parameter: Dict[ - str, Union[Selector(points_to_batch=True), Selector(points_to_batch=False), Any] + str, Union[Selector(), Any] ] @classmethod diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 446ccb79e..c7adcbe86 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -11,7 +11,7 @@ from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.errors import ( ExecutionGraphStructureError, - RuntimeInputError, + RuntimeInputError, AssumptionError, ) from inference.core.workflows.execution_engine.core import ExecutionEngine from inference.core.workflows.execution_engine.introspection import blocks_loader @@ -792,21 +792,14 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } - execution_engine = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) # when - result = execution_engine.run( - runtime_parameters={ - "non_batch_parameter": "dummy" - } - ) - - # then - print(result) + with pytest.raises(AssumptionError): + _ = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { From 0d859a95e2f26361c452e45b15e68ca28177b382 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 8 Nov 2024 15:17:51 +0100 Subject: [PATCH 25/67] Add abstraction to mark mixed inputs --- .../introspection/schema_parser.py | 10 ++++++--- .../v1/compiler/graph_constructor.py | 16 ++++++++------ .../v1/dynamic_blocks/block_assembler.py | 7 ++++-- .../v1/dynamic_blocks/entities.py | 4 ++-- .../execution_data_manager/manager.py | 2 +- inference/core/workflows/prototypes/block.py | 4 +++- .../unit_tests/core/cache/test_serializers.py | 16 ++++++++------ .../usage_tracking/test_collector.py | 10 +++++++-- .../__init__.py | 8 +++---- ...st_workflow_with_arbitrary_batch_inputs.py | 5 +++-- .../introspection/test_schema_parser.py | 22 +++++++++---------- .../introspection/test_selectors_parser.py | 4 ++-- 12 files changed, 63 insertions(+), 45 deletions(-) diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 2eff1bfa1..8e5327110 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -60,13 +60,15 @@ def parse_block_manifest( manifest_type.get_dimensionality_reference_property() ) inputs_accepting_batches = set(manifest_type.get_parameters_accepting_batches()) - inputs_accepting_batches_and_scalars = set(manifest_type.get_parameters_accepting_mixed_input()) + inputs_accepting_batches_and_scalars = set( + manifest_type.get_parameters_accepting_mixed_input() + ) return parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, - inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars + inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, ) @@ -300,7 +302,9 @@ def retrieve_selectors_from_simple_property( is_dict_element: bool = False, ) -> Optional[SelectorDefinition]: if REFERENCE_KEY in property_definition: - declared_points_to_batch = property_definition.get(SELECTOR_POINTS_TO_BATCH_KEY, False) + declared_points_to_batch = property_definition.get( + SELECTOR_POINTS_TO_BATCH_KEY, False + ) if declared_points_to_batch == "dynamic": if property_name in inputs_accepting_batches_and_scalars: points_to_batch = {True, False} diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index b86aab06a..f1a253b41 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -680,7 +680,9 @@ def denote_data_flow_for_step( input_property2batch_expected = defaultdict(set) for parsed_selector in parsed_step_input_selectors: for reference in parsed_selector.definition.allowed_references: - input_property2batch_expected[parsed_selector.definition.property_name].update(reference.points_to_batch) + input_property2batch_expected[ + parsed_selector.definition.property_name + ].update(reference.points_to_batch) for property_name, input_definition in input_data.items(): if property_name not in input_property2batch_expected: # only values plugged vi selectors are to be validated @@ -715,12 +717,12 @@ def denote_data_flow_for_step( ): raise ExecutionGraphStructureError( public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property strictly requires batch-oriented inputs, yet the input selector " - f"holds non-batch oriented input - this indicates the " - f"problem with construction of your Workflow - usually the problem occurs when " - f"non-batch oriented step inputs are filled with outputs of non batch-oriented " - f"steps or non batch-oriented inputs.", + f"into property `{property_name}` of step `{node}` - the step " + f"property strictly requires batch-oriented inputs, yet the input selector " + f"holds non-batch oriented input - this indicates the " + f"problem with construction of your Workflow - usually the problem occurs when " + f"non-batch oriented step inputs are filled with outputs of non batch-oriented " + f"steps or non batch-oriented inputs.", context="workflow_compilation | execution_graph_construction", ) if not parameters_with_batch_inputs: diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index b905712ea..d372407a3 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -360,7 +360,8 @@ def assembly_manifest_class_methods( setattr(manifest_class, "describe_outputs", classmethod(describe_outputs)) setattr(manifest_class, "get_actual_outputs", describe_outputs) accepts_batch_input = ( - lambda cls: len(manifest_description.batch_oriented_parameters) > 0 or len(manifest_description.mixed_parameters) + lambda cls: len(manifest_description.batch_oriented_parameters) > 0 + or len(manifest_description.mixed_parameters) or manifest_description.accepts_batch_input ) setattr(manifest_class, "accepts_batch_input", classmethod(accepts_batch_input)) @@ -372,7 +373,9 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches", classmethod(get_parameters_accepting_batches), ) - get_parameters_accepting_mixed_input = lambda cls: manifest_description.mixed_parameters + get_parameters_accepting_mixed_input = ( + lambda cls: manifest_description.mixed_parameters + ) setattr( manifest_class, "get_parameters_accepting_mixed_input", diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index d230dbf59..eee8a906c 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -113,8 +113,8 @@ class ManifestDescription(BaseModel): mixed_parameters: List[str] = Field( default_factory=list, description="List of parameters accepting both batches and scalars at the same time. " - "Value will override `accepts_batch_input` if non-empty " - "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", + "Value will override `accepts_batch_input` if non-empty " + "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", ) diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index 2cd0088b1..e0c7178f1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Generator, List, Optional, Tuple, Union, Set +from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Union from networkx import DiGraph diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 52ee672ce..34d08807b 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -55,7 +55,9 @@ def get_output_dimensionality_offset( @classmethod def accepts_batch_input(cls) -> Union[bool, List[str]]: - return len(cls.get_parameters_accepting_batches()) > 0 or len(cls.get_parameters_accepting_mixed_input()) + return len(cls.get_parameters_accepting_batches()) > 0 or len( + cls.get_parameters_accepting_mixed_input() + ) @classmethod def get_parameters_accepting_batches(cls) -> List[str]: diff --git a/tests/inference/unit_tests/core/cache/test_serializers.py b/tests/inference/unit_tests/core/cache/test_serializers.py index 8c982f6de..0d294e31b 100644 --- a/tests/inference/unit_tests/core/cache/test_serializers.py +++ b/tests/inference/unit_tests/core/cache/test_serializers.py @@ -1,9 +1,11 @@ import os from unittest.mock import MagicMock + import pytest + from inference.core.cache.serializers import ( - to_cachable_inference_item, build_condensed_response, + to_cachable_inference_item, ) from inference.core.entities.requests.inference import ( ClassificationInferenceRequest, @@ -11,16 +13,16 @@ ) from inference.core.entities.responses.inference import ( ClassificationInferenceResponse, - MultiLabelClassificationInferenceResponse, + ClassificationPrediction, InstanceSegmentationInferenceResponse, + InstanceSegmentationPrediction, + Keypoint, KeypointsDetectionInferenceResponse, + KeypointsPrediction, + MultiLabelClassificationInferenceResponse, + MultiLabelClassificationPrediction, ObjectDetectionInferenceResponse, ObjectDetectionPrediction, - ClassificationPrediction, - MultiLabelClassificationPrediction, - InstanceSegmentationPrediction, - KeypointsPrediction, - Keypoint, Point, ) diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py index 18f75ff7d..f701a0b73 100644 --- a/tests/inference/unit_tests/usage_tracking/test_collector.py +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -809,7 +809,11 @@ def test_zip_usage_payloads_with_different_exec_session_ids(): def test_system_info_with_dedicated_deployment_id(): # given - system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01", dedicated_deployment_id="deployment01") + system_info = UsageCollector.system_info( + ip_address="w.x.y.z", + hostname="hostname01", + dedicated_deployment_id="deployment01", + ) # then expected_system_info = { @@ -823,7 +827,9 @@ def test_system_info_with_dedicated_deployment_id(): def test_system_info_with_no_dedicated_deployment_id(): # given - system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01") + system_info = UsageCollector.system_info( + ip_address="w.x.y.z", hostname="hostname01" + ) # then expected_system_info = { diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 544cfac9d..522d986f6 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -101,7 +101,7 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): ] @classmethod - def get_parameters_accepting_batches(cls) -> List[str]: + def get_parameters_accepting_mixed_input(cls) -> List[str]: return ["mixed_parameter"] @classmethod @@ -238,12 +238,10 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): } ) type: Literal["CompoundMixedInputBlockManifestBlock"] - compound_parameter: Dict[ - str, Union[Selector(), Any] - ] + compound_parameter: Dict[str, Union[Selector(), Any]] @classmethod - def get_parameters_accepting_batches(cls) -> List[str]: + def get_parameters_accepting_mixed_input(cls) -> List[str]: return ["compound_parameter"] @classmethod diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index c7adcbe86..51802efef 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -10,8 +10,9 @@ from inference.core.utils.image_utils import load_image from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.errors import ( + AssumptionError, ExecutionGraphStructureError, - RuntimeInputError, AssumptionError, + RuntimeInputError, ) from inference.core.workflows.execution_engine.core import ExecutionEngine from inference.core.workflows.execution_engine.introspection import blocks_loader @@ -794,7 +795,7 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati } # when - with pytest.raises(AssumptionError): + with pytest.raises(ExecutionGraphStructureError): _ = ExecutionEngine.init( workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, init_parameters=workflow_init_parameters, diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py index 8a965273e..6d9189581 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_schema_parser.py @@ -284,7 +284,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_image", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ) ], is_list_element=False, @@ -299,7 +299,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_parameter", kind=[BOOLEAN_KIND, STRING_KIND], - points_to_batch=False, + points_to_batch={False}, ) ], is_list_element=False, @@ -314,7 +314,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="step_output", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ) ], is_list_element=False, @@ -332,7 +332,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: BOOLEAN_KIND, OBJECT_DETECTION_PREDICTION_KIND, ], - points_to_batch=True, + points_to_batch={True}, ) ], is_list_element=False, @@ -347,7 +347,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="step", kind=[], - points_to_batch=False, + points_to_batch={False}, ) ], is_list_element=False, @@ -397,12 +397,12 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_image", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), ReferenceDefinition( selected_element="step_output", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), # nested list is ignored ], @@ -456,12 +456,12 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_image", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), ReferenceDefinition( selected_element="step_output", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), # nested list is ignored ], @@ -515,12 +515,12 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_image", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), ReferenceDefinition( selected_element="step_output", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ), # nested list is ignored ], diff --git a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py index 0bf3a42fc..fe020eb78 100644 --- a/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py +++ b/tests/workflows/unit_tests/execution_engine/introspection/test_selectors_parser.py @@ -81,7 +81,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_image", kind=[IMAGE_KIND], - points_to_batch=True, + points_to_batch={True}, ) ], is_list_element=False, @@ -102,7 +102,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: ReferenceDefinition( selected_element="workflow_parameter", kind=[BOOLEAN_KIND, STRING_KIND], - points_to_batch=False, + points_to_batch={False}, ) ], is_list_element=False, From 9a3558546c3ec38738b036182858e3314b1d41e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Fri, 8 Nov 2024 16:25:51 +0100 Subject: [PATCH 26/67] Fix docs --- docs/workflows/definitions.md | 2 +- docs/workflows/execution_engine_changelog.md | 118 ++++++++++++++++--- docs/workflows/workflow_execution.md | 11 +- docs/workflows/workflows_compiler.md | 38 ++++-- inference/core/workflows/prototypes/block.py | 2 +- 5 files changed, 135 insertions(+), 36 deletions(-) diff --git a/docs/workflows/definitions.md b/docs/workflows/definitions.md index 70e97d2a5..ec2768aee 100644 --- a/docs/workflows/definitions.md +++ b/docs/workflows/definitions.md @@ -121,7 +121,7 @@ More details about the nature of batch-oriented data processing in workflows can ### Generic batch-oriented inputs -Since Execution Engine `v1.3.0` (inference release `v0.26.0`), Workflows support +Since Execution Engine `v1.3.0` (inference release `v0.27.0`), Workflows support batch oriented inputs of any *[kind](/workflows/kinds/)* and *[dimensionality](/workflows/workflow_execution/#steps-interactions-with-data)*. This inputs are **not enforced for now**, but we expect that as the ecosystem grows, they will diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index f96a52eb9..f35be618c 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -40,7 +40,7 @@ introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types/#workflowimagedata). -## Execution Engine `v1.3.0` | inference `v0.26.0` +## Execution Engine `v1.3.0` | inference `v0.27.0` * Introduced the change that let each kind have serializer and deserializer defined. The change decouples Workflows plugins with Execution Engine and make it possible to integrate the ecosystem with external systems that @@ -65,15 +65,23 @@ format introduced **at the level of Execution Engine**). As a result of the chan properly. This may not be the case in the future, as in most cases batch-oriented data *kind* may be inferred by compiler (yet this feature is not implemented for now). - * **new selector types annotation were introduced** - `BatchSelector` and `ScalarSelector`. - `BatchSelector` is supposed to replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector` - and `WorkflowVideoMetadataSelector` in block manifests, allowing batch-oriented data to be used as block input, - regardless of whether it comes from user inputs or outputs of other blocks. - `ScalarSelector` is meant to replace `WorkflowParameterSelector`, providing a way to input - non-natch oriented data into the block both from **workflow inputs** (via `WorkflowParameter` input) or - from **steps outputs** - such that steps can now directly feed parameters into other steps. - Mentioned old annotation types **should be assumed deprecated**, we advise to migrate into `BatchSelector`, - and `ScalarSelector` but that is not hard requirement. + * **new selector type annotation were introduced** - named simply `Selector(...)`. + `Selector(...)` is supposed to replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector`, + `WorkflowVideoMetadataSelector` and `WorkflowParameterSelector` in block manifests, + letting developers express that specific step manifest property is able to hold either selector of specific *kind*. + Mentioned old annotation types **should be assumed deprecated**, we advise to migrate into `Selector(...)`. + + * as a result of simplification in the selectors type annotations, the old selector will no + longer be providing the information on which parameter of blocks' `run(...)` method is + shipped by Execution Engine wrapped into [`Batch[X]` container](/workflows/internal_data_types/#batch). + Instead of old selectors type annotations and `block_manifest.accepts_batch_input()` method, + we propose the switch into two methods explicitly defining the parameters that are expected to + be fed with batch-oriented data (`block_manifest.get_parameters_accepting_batches()`) and + parameters capable of taking both *batches* and *scalar* values + (`block_manifest.get_parameters_accepting_mixed_input()`). Return value of `block_manifest.accepts_batch_input()` + is built upon the results of two new methods. The change is **non-breaking**, as any existing block which + was capable of processing batches must have implemented `block_manifest.accepts_batch_input()` method returning + `True` and use appropriate selector type annotation which indicated batch-oriented data. * As a result of the changes, it is now possible to **split any arbitrary workflows into multiple ones executing subsets of steps**, enabling building such tools as debuggers. @@ -121,9 +129,9 @@ subsets of steps**, enabling building such tools as debuggers. } ``` -??? Hint "New type annotation for selectors" +??? Hint "New type annotation for selectors - blocks without `Batch[X]` inputs" - Blocks manifest may **optionally** be updated to use `BatchSelector` in the following way: + Blocks manifest may **optionally** be updated to use `Selector` in the following way: ```python from typing import Union @@ -153,29 +161,103 @@ subsets of steps**, enabling building such tools as debuggers. should just be changed into: - ```{ .py linenums="1" hl_lines="7 8 13 14 20"} + ```{ .py linenums="1" hl_lines="7 12 13 19"} from inference.core.workflows.prototypes.block import WorkflowBlockManifest from inference.core.workflows.execution_engine.entities.types import ( INSTANCE_SEGMENTATION_PREDICTION_KIND, OBJECT_DETECTION_PREDICTION_KIND, FLOAT_KIND, IMAGE_KIND, - BatchSelector, - ScalarSelector, + Selector, ) class BlockManifest(WorkflowBlockManifest): - reference_image: BatchSelector(kind=[IMAGE_KIND]) - predictions: BatchSelector( + reference_image: Selector(kind=[IMAGE_KIND]) + predictions: Selector( kind=[ OBJECT_DETECTION_PREDICTION_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, ] ) - confidence: ScalarSelector(kind=[FLOAT_KIND]) + confidence: Selector(kind=[FLOAT_KIND]) ``` +??? Hint "New type annotation for selectors - blocks with `Batch[X]` inputs" + + Blocks manifest may **optionally** be updated to use `Selector` in the following way: + + ```python + from typing import Union + from inference.core.workflows.prototypes.block import WorkflowBlockManifest + from inference.core.workflows.execution_engine.entities.types import ( + INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + FLOAT_KIND, + WorkflowImageSelector, + StepOutputImageSelector, + StepOutputSelector, + WorkflowParameterSelector, + ) + + + class BlockManifest(WorkflowBlockManifest): + + reference_image: Union[WorkflowImageSelector, StepOutputImageSelector] + predictions: StepOutputSelector( + kind=[ + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) + data: Dict[str, Union[StepOutputSelector(), WorkflowParameterSelector()]] + confidence: WorkflowParameterSelector(kind=[FLOAT_KIND]) + + @classmethod + def accepts_batch_input(cls) -> bool: + return True + ``` + + should be changed into: + + ```{ .py linenums="1" hl_lines="7 12 13 19 20 22-24 26-28"} + from inference.core.workflows.prototypes.block import WorkflowBlockManifest + from inference.core.workflows.execution_engine.entities.types import ( + INSTANCE_SEGMENTATION_PREDICTION_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + FLOAT_KIND, + IMAGE_KIND, + Selector, + ) + + + class BlockManifest(WorkflowBlockManifest): + reference_image: Selector(kind=[IMAGE_KIND]) + predictions: Selector( + kind=[ + OBJECT_DETECTION_PREDICTION_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + ] + ) + data: Dict[str, Selector()] + confidence: Selector(kind=[FLOAT_KIND]) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["predictions"] + + @classmethod + def get_parameters_accepting_mixed_input(cls) -> List[str]: + return ["data"] + ``` + + Please point out that: + + * the `data` property in the original example was able to accept both **batches** of data + and **scalar** values due to selector of batch-orienetd data (`StepOutputSelector`) and + *scalar* data (`WorkflowParameterSelector`). Now the same is manifested by `Selector(...)` type + annotation and return value from `get_parameters_accepting_mixed_input(...)` method. + ??? Hint "New inputs in Workflows definitions" diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index 45358e529..141daefa3 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -95,11 +95,11 @@ for number in [1, 2, 3, 4]: results.append(is_even(number)) ``` -In Workflows, usually you do not need to worry about broadcasting the operations into batches of data - -Execution Engine is doing that for you behind the scenes, but once you understand the role of *batch-oriented* +In Workflows, usually **you do not need to worry** about broadcasting the operations into batches of data - +Execution Engine is doing that for you behind the scenes, but once you understood the role of *batch-oriented* data, let's think if all data can be represented as batches. -Standard way of inferring predictions from classification model can be illustrated with the following +Standard way of making predictions from classification model is be illustrated with the following pseudo-code: ```python images = [PIL.Image(...), PIL.Image(...), PIL.Image(...), PIL.Image(...)] @@ -154,8 +154,7 @@ You would likely say: - In options B and C, the output will be a batch. In option C, the non-batch-oriented parameters will be broadcast to match the batch size of the data. -And you’d be correct. If you understand that, you probably only have two more concepts to understand before -you can comfortably say you understand everything needed to successfully build and run complex Workflows. +And you’d be correct. Knowing that, you only have two more concepts to understand to become Workflows expert. Let’s say you want to create a Workflow with these steps: @@ -172,7 +171,7 @@ Here’s what happens with the data in the cropping step: 2. The object detection model finds a different number of objects in each image. -3. The cropping step then creates new images for each detected object, resulting in a new batch of images +3. The cropping step then creates new image for each detected object, resulting in a new batch of images for each original image. So, you end up with a nested list of images, with sizes like `[(k[1], ), (k[2], ), ... (k[n])]`, where each `k[i]` diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 593c33ace..420cfa7f8 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -234,16 +234,34 @@ can decide separately for each element in the batch which ones will proceed and #### Batch-orientation compatibility -As it was outlined, Workflows define batch-oriented data and parameters. -Some blocks may require batch-oriented inputs, but that is not always required. When -block do not require batch-oriented input, it will be fed only with parameters and -will produce a single result. Such outputs can be used as inputs to other steps, -but only if block class returns `False` from `block.accepts_batch_input(...)` method. The -constraint is introduced to ensure stability of blocks interface. -If there is a need for such steps connection, this is usually an indicator that -the input parameter should not be marked with `BatchSelector(...)` type annotation, -but rather with `ScalarSelecector(...)` - **if this assumption is wrong, please let us -know in GitHub issues**. +As it was outlined, Workflows define **batch-oriented data** and **scalars**. +From [the description of the nature of data in Workflows](/workflows/workflow_execution/#what-is-the-data), +you can also conclude that operations which are executed against batch-oriented data +have two almost equivalent ways of running: + +* **all-at-once:** taking whole batches of data and processing them + +* **one-by-one:** looping over batch elements and getting results sequentially + +Since the default way for Workflow blocks to deal with the batches is to consume them element-by-element, +**there is no real difference** between **batch-oriented data** and **scalars** +in such case. Execution Engine simply unpack scalars from batches and pass them to each step. + +The process may complicate when block accepts batch input. You will learn the +details in [blocks development guide](/workflows/create_workflow_block/), but +block is required to denote each inputs that must be provided *batch-wise* and all inputs +which can be feed with both batch-oriented data and scalars at the same time (which is much +less common case). In such cases, *lineage* is used to deduce if the actual data feed into +every step input is *batch* or *scalar*. When violation is detected (for instance *scalar* is provided for input +that requires batches or vice versa) - the error is raised. + + +!!! Note "Potential future improvements" + + At this moment, we are not sure if the behaviour described above is limiting the potential of + Workflows ecosystem. If you see that your Workflows cannot run due to the errors + being result of described mechanism - please let us know in + [GitHub issues](https://github.com/roboflow/inference/issues). ## Initializing Workflow steps from blocks diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 34d08807b..7875c6bcb 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -54,7 +54,7 @@ def get_output_dimensionality_offset( return 0 @classmethod - def accepts_batch_input(cls) -> Union[bool, List[str]]: + def accepts_batch_input(cls) -> bool: return len(cls.get_parameters_accepting_batches()) > 0 or len( cls.get_parameters_accepting_mixed_input() ) From a17c656256938092f69c515a7bd522861f346ee8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 11 Nov 2024 07:50:39 +0100 Subject: [PATCH 27/67] Adjust docs to changes --- docs/workflows/create_workflow_block.md | 471 +++++++++++------- .../workflows/core_steps/formatters/csv/v1.py | 2 +- .../detections_transformation/v1.py | 2 +- .../introspection/schema_parser.py | 2 +- .../v1/dynamic_blocks/block_assembler.py | 10 +- .../v1/dynamic_blocks/entities.py | 2 +- inference/core/workflows/prototypes/block.py | 4 +- .../__init__.py | 4 +- 8 files changed, 292 insertions(+), 205 deletions(-) diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index c20223910..72c43776a 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -304,23 +304,22 @@ parsing specific steps in a Workflow definition * `name` - this property will be used to give the step a unique name and let other steps selects it via selectors -### Adding batch-oriented inputs +### Adding inputs -We want our step to take two batch-oriented inputs with images to be compared - so effectively -we will be creating SIMD block. +We want our step to take two inputs with images to be compared. -??? example "Adding batch-oriented inputs" +??? example "Adding inputs" Let's see how to add definitions of those inputs to manifest: - ```{ .py linenums="1" hl_lines="2 6-9 18-23"} + ```{ .py linenums="1" hl_lines="2 6-9 20-25"} from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, ) @@ -329,30 +328,33 @@ we will be creating SIMD block. type: Literal["my_plugin/images_similarity@v1"] name: str # all properties apart from `type` and `name` are treated as either - # definitions of batch-oriented data to be processed by block or its - # parameters that influence execution of steps created based on block - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + # hardcoded parameters or data selectors. Data selectors are strings + # that start from `$steps.` or `$inputs.` marking references for data + # available in runtime - in this case we usually specify kinds of data + # to let compiler know what we expect the data to look like. + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) ``` * in the lines `2-9`, we've added a couple of imports to ensure that we have everything needed - * line `18` defines `image_1` parameter - as manifest is prototype for Workflow Definition, + * line `20` defines `image_1` parameter - as manifest is prototype for Workflow Definition, the only way to tell about image to be used by step is to provide selector - we have - a specialised type in core library that can be used - `BatchSelector`. + a specialised type in core library that can be used - `Selector`. If you look deeper into codebase, you will discover this is type alias constructor function - telling `pydantic` to expect string matching `$inputs.{name}` and `$steps.{name}.*` patterns respectively, additionally providing extra schema field metadata that tells Workflows ecosystem components that the `kind` of data behind selector is - [image](/workflows/kinds/image/). + [image](/workflows/kinds/image/). **important note:** we denote *kind* as list - the list of specific kinds + is interpreted as *union of kinds* by Execution Engine. - * denoting `pydantic` `Field(...)` attribute in the last parts of line `17` is optional, yet appreciated, + * denoting `pydantic` `Field(...)` attribute in the last parts of line `20` is optional, yet appreciated, especially for blocks intended to cooperate with Workflows UI - * starting in line `21`, you can find definition of `image_2` parameter which is very similar to `image_1`. + * starting in line `23`, you can find definition of `image_2` parameter which is very similar to `image_1`. Such definition of manifest can handle the following step declaration in Workflow definition: @@ -368,35 +370,32 @@ Such definition of manifest can handle the following step declaration in Workflo This definition will make the Compiler and Execution Engine: -* select as a step prototype the block which declared manifest with type discriminator being -`my_plugin/images_similarity@v1` +* initialize the step from Workflow block declaring type `my_plugin/images_similarity@v1` * supply two parameters for the steps run method: - * `input_1` of type `WorkflowImageData` which will be filled with image submitted as Workflow execution input + * `input_1` of type `WorkflowImageData` which will be filled with image submitted as Workflow execution input + named `my_image`. * `imput_2` of type `WorkflowImageData` which will be generated at runtime, by another step called `image_transformation` -### Adding parameter to the manifest +### Adding parameters to the manifest -Let's now add the parameter that will influence step execution. The parameter is not assumed to be -batch-oriented and will affect all batch elements passed to the step. +Let's now add the parameter that will influence step execution. ??? example "Adding parameter to the manifest" - ```{ .py linenums="1" hl_lines="9-11 27-33"} + ```{ .py linenums="1" hl_lines="9 27-33"} from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, - FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, ) @@ -405,39 +404,31 @@ batch-oriented and will affect all batch elements passed to the step. type: Literal["my_plugin/images_similarity@v1"] name: str # all properties apart from `type` and `name` are treated as either - # definitions of batch-oriented data to be processed by block or its - # parameters that influence execution of steps created based on block - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + # hardcoded parameters or data selectors. Data selectors are strings + # that start from `$steps.` or `$inputs.` marking references for data + # available in runtime - in this case we usually specify kinds of data + # to let compiler know what we expect the data to look like. + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ - FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + float, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", ) ``` - - * line `9` imports `FloatZeroToOne` which is type alias providing validation - for float values in range 0.0-1.0 - this is based on native `pydantic` mechanism and - everyone could create this type annotation locally in module hosting block - - * line `10` imports function `ScalarSelector(...)` capable to dynamically create - `pydantic` type annotation for selector to workflow input parameter (matching format `$inputs.param_name`), - declaring union of kinds compatible with the field - * line `11` imports [`float_zero_to_one`](/workflows/kinds/float_zero_to_one) `kind` definition which will be used later + * line `9` imports [`float_zero_to_one`](/workflows/kinds/float_zero_to_one) `kind` + definition which will be used to define the parameter. * in line `27` we start defining parameter called `similarity_threshold`. Manifest will accept - either float values (in range `[0.0-1.0]`) or selector to workflow input of `kind` - [`float_zero_to_one`](/workflows/kinds/float_zero_to_one). Please point out on how - function creating type annotation (`ScalarSelector(...)`) is used - - in particular, expected `kind` of data is passed as list of `kinds` - representing union - of expected data `kinds`. + either float values or selector to workflow input of `kind` + [`float_zero_to_one`](/workflows/kinds/float_zero_to_one), imported in line `9`. Such definition of manifest can handle the following step declaration in Workflow definition: @@ -459,15 +450,14 @@ or alternatively: "name": "my_step", "image_1": "$inputs.my_image", "image_2": "$steps.image_transformation.image", - "similarity_threshold": "0.5" + "similarity_threshold": 0.5 } ``` ### Declaring block outputs -Our manifest is ready regarding properties that can be declared in Workflow definitions, -but we still need to provide additional information for the Execution Engine to successfully -run the block. +We have successfully defined inputs for our block, but we are still missing couple of elements required to +successfully run blocks. Let's define block outputs. ??? example "Declaring block outputs" @@ -475,18 +465,16 @@ run the block. to increase block stability, we advise to provide information about execution engine compatibility. - ```{ .py linenums="1" hl_lines="1 5 13 34-41 43-45"} - from typing import Literal, Union, List, Optional + ```{ .py linenums="1" hl_lines="5 11 32-39 41-43"} + from typing import Literal, Union from pydantic import Field from inference.core.workflows.prototypes.block import ( WorkflowBlockManifest, OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, - FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -495,15 +483,15 @@ run the block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ - FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + float, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -520,21 +508,19 @@ run the block. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" ``` - - * line `1` contains additional imports from `typing` - + * line `5` imports class that is used to describe step outputs - * line `13` imports [`boolean`](/workflows/kinds/boolean) `kind` to be used + * line `11` imports [`boolean`](/workflows/kinds/boolean) `kind` to be used in outputs definitions - * lines `34-41` declare class method to specify outputs from the block - + * lines `32-39` declare class method to specify outputs from the block - each entry in list declare one return property for each batch element and its `kind`. Our block will return boolean flag `images_match` for each pair of images. - * lines `43-45` declare compatibility of the block with Execution Engine - + * lines `41-43` declare compatibility of the block with Execution Engine - see [versioning page](/workflows/versioning/) for more details As a result of those changes: @@ -557,7 +543,7 @@ in their inputs * additionally, block manifest should implement instance method `get_actual_outputs(...)` that provides list of actual outputs that can be generated based on filled manifest data - ```{ .py linenums="1" hl_lines="14 36-43 45-50"} + ```{ .py linenums="1" hl_lines="13 35-42 44-49"} from typing import Literal, Union, List, Optional from pydantic import Field from inference.core.workflows.prototypes.block import ( @@ -565,10 +551,9 @@ in their inputs OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, WILDCARD_KIND, @@ -578,15 +563,15 @@ in their inputs class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ - FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + float, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -624,7 +609,7 @@ block. ??? example "Block scaffolding" - ```{ .py linenums="1" hl_lines="1 5 6 8-11 54-56 58-64"} + ```{ .py linenums="1" hl_lines="1 5 6 8-11 53-55 57-63"} from typing import Literal, Union, List, Optional, Type from pydantic import Field from inference.core.workflows.prototypes.block import ( @@ -637,10 +622,9 @@ block. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -648,15 +632,15 @@ block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -673,7 +657,7 @@ block. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImagesSimilarityBlock(WorkflowBlock): @@ -691,15 +675,18 @@ block. pass ``` - * lines `1`, `5-6` and `8-9` added changes into import surtucture to + * lines `1`, `5-6` and `8-11` added changes into import surtucture to provide additional symbols required to properly define block class and all of its methods signatures - * lines `54-56` defines class method `get_manifest(...)` to simply return + * lines `53-55` defines class method `get_manifest(...)` to simply return the manifest class we cretaed earlier - * lines `58-64` define `run(...)` function, which Execution Engine - will invoke with data to get desired results + * lines `57-63` define `run(...)` function, which Execution Engine + will invoke with data to get desired results. Please note that + manifest fields defining inputs of [image](/workflows/kinds/image/) kind + are marked as `WorkflowImageData` - which is compliant with intenal data + representation of `image` kind described in [kind documentation](/workflows/kinds/image/). ### Providing implementation for block logic @@ -714,7 +701,7 @@ it can produce meaningful results. ??? example "Implementation of `run(...)` method" - ```{ .py linenums="1" hl_lines="3 56-58 70-81"} + ```{ .py linenums="1" hl_lines="3 55-57 69-80"} from typing import Literal, Union, List, Optional, Type from pydantic import Field import cv2 @@ -729,10 +716,9 @@ it can produce meaningful results. WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -740,15 +726,15 @@ it can produce meaningful results. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", @@ -765,7 +751,7 @@ it can produce meaningful results. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImagesSimilarityBlock(WorkflowBlock): @@ -800,49 +786,30 @@ it can produce meaningful results. * in line `3` we import OpenCV - * lines `56-58` defines block constructor, thanks to this - state of block + * lines `55-57` defines block constructor, thanks to this - state of block is initialised once and live through consecutive invocation of `run(...)` method - for instance when Execution Engine runs on consecutive frames of video - * lines `70-81` provide implementation of block functionality - the details are trully not + * lines `69-80` provide implementation of block functionality - the details are trully not important regarding Workflows ecosystem, but there are few details you should focus: - * lines `70` and `71` make use of `WorkflowImageData` abstraction, showcasing how + * lines `69` and `70` make use of `WorkflowImageData` abstraction, showcasing how `numpy_image` property can be used to get `np.ndarray` from internal representation of images in Workflows. We advise to expole remaining properties of `WorkflowImageData` to discover more. - * result of workflow block execution, declared in lines `79-81` is in our case just a dictionary - **with the keys being the names of outputs declared in manifest**, in line `44`. Be sure to provide all + * result of workflow block execution, declared in lines `78-80` is in our case just a dictionary + **with the keys being the names of outputs declared in manifest**, in line `43`. Be sure to provide all declared outputs - otherwise Execution Engine will raise error. - -You may ask yourself how it is possible that implemented block accepts batch-oriented workflow input, but do not -operate on batches directly. This is due to the fact that the default block behaviour is to run one-by-one against -all elements of input batches. We will show how to change that in [advanced topics](#advanced-topics) section. - -!!! note - - One important note: blocks, like all other classes, have constructors that may initialize a state. This state can - persist across multiple Workflow runs when using the same instance of the Execution Engine. If the state management - needs to be aware of which batch element it processes (e.g., in object tracking scenarios), the block creator - should use dedicated batch-oriented inputs. These inputs, provide relevant metadatadata — like the - `WorkflowVideoMetadata` input, which is crucial for tracking use cases and can be used along with `WorkflowImage` - input in a block implementing tracker. - - The ecosystem is evolving, and new input types will be introduced over time. If a specific input type needed for - a use case is not available, an alternative is to design the block to process entire input batches. This way, - you can rely on the Batch container's indices property, which provides an index for each batch element, allowing - you to maintain the correct order of processing. ## Exposing block in `plugin` -Now, your block is ready to be used, but if you declared step using it in your Workflow definition you -would see an error. This is because no plugin exports the block you just created. Details of blocks bundling -will be covered in [separate page](/workflows/blocks_bundling/), but the remaining thing to do is to -add block class into list returned from your plugins' `load_blocks(...)` function: +Now, your block is ready to be used, but Execution Engine is not aware of its existence. This is because no registered +plugin exports the block you just created. Details of blocks bundling are be covered in [separate page](/workflows/blocks_bundling/), +but the remaining thing to do is to add block class into list returned from your plugins' `load_blocks(...)` function: ```python -# __init__.py of your plugin +# __init__.py of your plugin (or roboflow_core plugin if you contribute directly to `inference`) from my_plugin.images_similarity.v1 import ImagesSimilarityBlock # this is example import! requires adjustment @@ -862,7 +829,7 @@ on how to use it for your block. ??? example "Implementation of blocks accepting batches" - ```{ .py linenums="1" hl_lines="13 41-43 71-72 75-78 86-87"} + ```{ .py linenums="1" hl_lines="13 40-42 70-71 74-77 85-86"} from typing import Literal, Union, List, Optional, Type from pydantic import Field import cv2 @@ -878,10 +845,9 @@ on how to use it for your block. Batch, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, IMAGE_KIND, FloatZeroToOne, - ScalarSelector, FLOAT_ZERO_TO_ONE_KIND, BOOLEAN_KIND, ) @@ -889,23 +855,23 @@ on how to use it for your block. class ImagesSimilarityManifest(WorkflowBlockManifest): type: Literal["my_plugin/images_similarity@v1"] name: str - image_1: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_1: Selector(kind=[IMAGE_KIND]) = Field( description="First image to calculate similarity", ) - image_2: BatchSelector(kind=[IMAGE_KIND]) = Field( + image_2: Selector(kind=[IMAGE_KIND]) = Field( description="Second image to calculate similarity", ) similarity_threshold: Union[ FloatZeroToOne, - ScalarSelector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), ] = Field( default=0.4, description="Threshold to assume that images are similar", ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> bool: + return ["image_1", "image_2"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -918,7 +884,7 @@ on how to use it for your block. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImagesSimilarityBlock(WorkflowBlock): @@ -955,19 +921,125 @@ on how to use it for your block. * line `13` imports `Batch` from core of workflows library - this class represent container which is veri similar to list (but read-only) to keep batch elements - * lines `41-43` define class method that changes default behaviour of the block and make it capable - to process batches + * lines `40-42` define class method that changes default behaviour of the block and make it capable + to process batches - we are marking each parameter that the `run(...)` method **recognizes as batch-oriented**. * changes introduced above made the signature of `run(...)` method to change, now `image_1` and `image_2` - are not instances of `WorkflowImageData`, but rather batches of elements of this type + are not instances of `WorkflowImageData`, but rather batches of elements of this type. **Important note:** + having multiple batch-oriented parameters we expect that those batches would have the elements related to + each other at corresponding positions - such that our block comparing `image_1[1]` into `image_2[1]` actually + performs logically meaningful operation. - * lines `75-78`, `86-87` present changes that needed to be introduced to run processing across all batch + * lines `74-77`, `85-86` present changes that needed to be introduced to run processing across all batch elements - showcasing how to iterate over batch elements if needed - * it is important to note how outputs are constructed in line `86` - each element of batch will be given + * it is important to note how outputs are constructed in line `85` - each element of batch will be given its entry in the list which is returned from `run(...)` method. Order must be aligned with order of batch elements. Each output dictionary must provide all keys declared in block outputs. + +??? Warning "Inputs that accept both batches and scalars" + + It is **relatively unlikely**, but may happen that your block would need to accept both batch-oriented data + and scalars within a single input parameter. Execution Engine recognises that using + `get_parameters_accepting_batches_and_scalars(...)` method of block manifest. Take a look at the + example provided below: + + + ```{ .py linenums="1" hl_lines="20-22 24-26 45-47 49 50-54 65-70"} + from typing import Literal, Union, List, Optional, Type, Any, Dict + from pydantic import Field + + from inference.core.workflows.prototypes.block import ( + WorkflowBlockManifest, + WorkflowBlock, + BlockResult, + ) + from inference.core.workflows.execution_engine.entities.base import ( + OutputDefinition, + Batch, + ) + from inference.core.workflows.execution_engine.entities.types import ( + Selector, + ) + + class ExampleManifest(WorkflowBlockManifest): + type: Literal["my_plugin/example@v1"] + name: str + param_1: Selector() + param_2: List[Selector()] + param_3: Dict[str, Selector()] + + @classmethod + def get_parameters_accepting_batches_and_scalars(cls) -> bool: + return ["param_1", "param_2", "param_3"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="dummy")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + + class ExampleBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return ExampleManifest + + def run( + self, + param_1: Any, + param_2: List[Any], + param_3: Dict[str, Any], + ) -> BlockResult: + batch_size = None + if isinstance(param_1, Batch): + param_1_result = ... # do something with batch-oriented param + batch_size = len(param_1) + else: + param_1_result = ... # do something with scalar param + for element in param_2: + if isinstance(element, Batch): + ... + else: + ... + for key, value in param_3.items(): + if isinstance(element, value): + ... + else: + ... + if batch_size is None: + return {"dummy": "some_result"} + result = [] + for _ in range(batch_size): + result.append({"dummy": "some_result"}) + return result + ``` + + * lines `20-22` specify manifest parameters that are expected to accept mixed (both scalar and batch-oriented) + input data - point out that at this stage there is no difference in definition compared to previous examples. + + * lines `24-26` specify `get_parameters_accepting_batches_and_scalars(...)` method to tell the Execution + Engine that block `run(...)` method can handle both scalar and batch-oriented inputs for the specified + parameters. + + * lines `45-47` depict the parameters of mixed nature in `run(...)` method signature. + + * line `49` reveals that we must keep track of the expected output size **within the block logic**. That's + why it is quite tricky to implement blocks with mixed inputs. Normally, when block `run(...)` method + operates on scalars - in majority of cases (exceptions will be described below) - the metod constructs + single output dictionary. Similairly, when batch-oriented inputs are accepted - those inputs + define expected output size. In this case, however, we must manually detect batches and catch their sizes. + + * lines `50-54` showcase how we usually deal with mixed parameters - applying different logic when + batch-oriented data is detected + + * as mentioned earlier, output construction must also be adjusted to the nature of mixed inputs - which + is illustrated in lines `65-70` + ### Implementation of flow-control block Flow-control blocks differs quite substantially from other blocks that just process the data. Here we will show @@ -981,11 +1053,11 @@ is defined as `$steps.{step_name}` - similar to step output selector, but withou * `FlowControl` object specify next steps (from selectors provided in step manifest) that for given batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-control) should pick up next -??? example "Implementation of flow-control - SIMD block" +??? example "Implementation of flow-control" Example provides and comments out implementation of random continue block - ```{ .py linenums="1" hl_lines="10 14 26 28-31 55-56"} + ```{ .py linenums="1" hl_lines="10 14 28-31 55-56"} from typing import List, Literal, Optional, Type, Union import random @@ -996,7 +1068,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con ) from inference.core.workflows.execution_engine.entities.types import ( StepSelector, - BatchSelector, + Selector, IMAGE_KIND, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl @@ -1011,7 +1083,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/random_continue@v1"] name: str - image: BatchSelector(kind=[IMAGE_KIND]) = ImageInputField + image: Selector(kind=[IMAGE_KIND]) = ImageInputField probability: float next_steps: List[StepSelector] = Field( description="Reference to step which shall be executed if expression evaluates to true", @@ -1024,7 +1096,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.2.0,<2.0.0" class RandomContinueBlockV1(WorkflowBlock): @@ -1050,30 +1122,30 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con * line `14` imports `FlowControl` class which is the only viable response from flow-control block - * line `26` specifies `image` which is batch-oriented input making the block SIMD - - which means that for each element of images batch, block will make random choice on - flow-control - if not that input block would operate in non-SIMD mode - * line `28` defines list of step selectors **which effectively turns the block into flow-control one** * lines `55` and `56` show how to construct output - `FlowControl` object accept context being `None`, `string` or `list of strings` - `None` represent flow termination for the batch element, strings are expected to be selectors for next steps, passed in input. -??? example "Implementation of flow-control non-SIMD block" +??? example "Implementation of flow-control - batch variant" Example provides and comments out implementation of random continue block - ```{ .py linenums="1" hl_lines="9 11 24-27 50-51"} + ```{ .py linenums="1" hl_lines="8 11 15 29-32 38-40 55 59 60 61-63"} from typing import List, Literal, Optional, Type, Union import random from pydantic import Field from inference.core.workflows.execution_engine.entities.base import ( OutputDefinition, + WorkflowImageData, + Batch, ) from inference.core.workflows.execution_engine.entities.types import ( StepSelector, + Selector, + IMAGE_KIND, ) from inference.core.workflows.execution_engine.v1.entities import FlowControl from inference.core.workflows.prototypes.block import ( @@ -1087,6 +1159,7 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/random_continue@v1"] name: str + image: Selector(kind=[IMAGE_KIND]) = ImageInputField probability: float next_steps: List[StepSelector] = Field( description="Reference to step which shall be executed if expression evaluates to true", @@ -1096,10 +1169,14 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [] + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["image"] @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RandomContinueBlockV1(WorkflowBlock): @@ -1110,23 +1187,34 @@ batch element (SIMD flow-control) or whole workflow execution (non-SIMD flow-con def run( self, + image: Batch[WorkflowImageData], probability: float, next_steps: List[str], ) -> BlockResult: - if not next_steps or random.random() > probability: - return FlowControl() - return FlowControl(context=next_steps) + result = [] + for _ in image: + if not next_steps or random.random() > probability: + result.append(FlowControl()) + result.append(FlowControl(context=next_steps)) + return result ``` - * line `9` imports type annotation for step selector which will be used to + * line `11` imports type annotation for step selector which will be used to notify Execution Engine that the block controls the flow - * line `11` imports `FlowControl` class which is the only viable response from + * line `15` imports `FlowControl` class which is the only viable response from flow-control block - * lines `24-27` defines list of step selectors **which effectively turns the block into flow-control one** + * lines `29-32` defines list of step selectors **which effectively turns the block into flow-control one** + + * lines `38-40` contain definition of `get_parameters_accepting_batches(...)` method telling Execution + Engine that block `run(...)` method expects batch-oriented `image` parameter. + + * line `59` revels that we need to return flow-control guide for each and every element of `image` batch. - * lines `50` and `51` show how to construct output - `FlowControl` object accept context being `None`, `string` or + * to achieve that end, in line `60` we iterate over the contntent of batch. + + * lines `61-63` show how to construct output - `FlowControl` object accept context being `None`, `string` or `list of strings` - `None` represent flow termination for the batch element, strings are expected to be selectors for next steps, passed in input. @@ -1163,7 +1251,7 @@ def run(self, predictions: List[dict]) -> BlockResult: OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, + Selector, OBJECT_DETECTION_PREDICTION_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1177,7 +1265,7 @@ def run(self, predictions: List[dict]) -> BlockResult: class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/fusion_of_predictions@v1"] name: str - predictions: List[BatchSelector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( + predictions: List[Selector(kind=[OBJECT_DETECTION_PREDICTION_KIND])] = Field( description="Selectors to step outputs", examples=[["$steps.model_1.predictions", "$steps.model_2.predictions"]], ) @@ -1193,7 +1281,7 @@ def run(self, predictions: List[dict]) -> BlockResult: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class FusionBlockV1(WorkflowBlock): @@ -1239,7 +1327,7 @@ keys serve as names for those selectors. ??? example "Nested selectors - named selectors" - ```{ .py linenums="1" hl_lines="23-26 47"} + ```{ .py linenums="1" hl_lines="22-25 46"} from typing import List, Literal, Optional, Type, Any from pydantic import Field @@ -1248,8 +1336,7 @@ keys serve as names for those selectors. OutputDefinition, ) from inference.core.workflows.execution_engine.entities.types import ( - BatchSelector, - ScalarSelector, + Selector ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1262,7 +1349,7 @@ keys serve as names for those selectors. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/named_selectors_example@v1"] name: str - data: Dict[str, BatchSelector(), ScalarSelector()] = Field( + data: Dict[str, Selector()] = Field( description="Selectors to step outputs", examples=[{"a": $steps.model_1.predictions", "b": "$Inputs.data"}], ) @@ -1275,7 +1362,7 @@ keys serve as names for those selectors. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class BlockWithNamedSelectorsV1(WorkflowBlock): @@ -1292,10 +1379,10 @@ keys serve as names for those selectors. return {"my_output": ...} ``` - * lines `23-26` depict how to define manifest field capable of accepting + * lines `22-25` depict how to define manifest field capable of accepting dictionary of selectors - providing mapping between selector name and value - * line `47` shows what to expect as input to block's `run(...)` method - + * line `46` shows what to expect as input to block's `run(...)` method - dict of objects which are reffered with selectors. If the block accepted batches, the input type of `data` field would be `Dict[str, Union[Batch[Any], Any]]`. In non-batch cases, non-batch-oriented data referenced by selector is automatically @@ -1367,7 +1454,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1377,8 +1464,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) - predictions: BatchSelector( + image: Selector(kind=[IMAGE_KIND]) + predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1394,7 +1481,7 @@ the method signatures. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DynamicCropBlockV1(WorkflowBlock): @@ -1454,7 +1541,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1465,8 +1552,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - crops: BatchSelector(kind=[IMAGE_KIND]) - crops_predictions: BatchSelector( + crops: Selector(kind=[IMAGE_KIND]) + crops_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @@ -1538,7 +1625,7 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1550,8 +1637,8 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) - image_predictions: BatchSelector( + image: Selector(kind=[IMAGE_KIND]) + image_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @@ -1637,7 +1724,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1647,14 +1734,14 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_block/dynamic_crop@v1"] - image: BatchSelector(kind=[IMAGE_KIND]) - predictions: BatchSelector( + image: Selector(kind=[IMAGE_KIND]) + predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> bool: + return ["image", "predictions"] @classmethod def get_output_dimensionality_offset(cls) -> int: @@ -1668,7 +1755,7 @@ the method signatures. @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DynamicCropBlockV1(WorkflowBlock): @@ -1738,7 +1825,7 @@ the method signatures. from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -1749,14 +1836,14 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/tile_detections@v1"] - images_crops: BatchSelector(kind=[IMAGE_KIND]) - crops_predictions: BatchSelector( + images_crops: Selector(kind=[IMAGE_KIND]) + crops_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> bool: + return ["images_crops", "crops_predictions"] @classmethod def get_output_dimensionality_offset(cls) -> int: @@ -1832,7 +1919,7 @@ the method signatures. ) from inference.core.workflows.execution_engine.entities.types import ( OBJECT_DETECTION_PREDICTION_KIND, - BatchSelector, + Selector, IMAGE_KIND, ) from inference.core.workflows.prototypes.block import ( @@ -1844,14 +1931,14 @@ the method signatures. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/stitch@v1"] - images: BatchSelector(kind=[IMAGE_KIND]) - images_predictions: BatchSelector( + images: Selector(kind=[IMAGE_KIND]) + images_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND], ) @classmethod - def accepts_batch_input(cls) -> bool: - return True + def get_parameters_accepting_batches(cls) -> bool: + return ["images", "images_predictions"] @classmethod def get_input_dimensionality_offsets(cls) -> Dict[str, int]: @@ -1946,7 +2033,7 @@ that even if some elements are empty, the output lacks missing elements making i Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import BatchSelector + from inference.core.workflows.execution_engine.entities.types import Selector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -1956,7 +2043,7 @@ that even if some elements are empty, the output lacks missing elements making i class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/first_non_empty_or_default@v1"] - data: List[BatchSelector()] + data: List[Selector()] default: Any @classmethod @@ -1969,7 +2056,7 @@ that even if some elements are empty, the output lacks missing elements making i @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class FirstNonEmptyOrDefaultBlockV1(WorkflowBlock): @@ -2029,7 +2116,7 @@ Let's see how to request init parameters while defining block. Batch, OutputDefinition, ) - from inference.core.workflows.execution_engine.entities.types import BatchSelector + from inference.core.workflows.execution_engine.entities.types import Selector from inference.core.workflows.prototypes.block import ( BlockResult, WorkflowBlock, @@ -2039,7 +2126,7 @@ Let's see how to request init parameters while defining block. class BlockManifest(WorkflowBlockManifest): type: Literal["my_plugin/example@v1"] - data: List[BatchSelector()] + data: List[Selector()] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index cb22eb5cf..693bd17e5 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -173,7 +173,7 @@ def protect_timestamp_column(cls, value: dict) -> dict: return value @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return ["columns_data"] @classmethod diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index 6cf539937..ae1c5d0e8 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -115,7 +115,7 @@ def get_parameters_accepting_batches(cls) -> List[str]: return ["predictions"] @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return ["operations_parameters"] @classmethod diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 8e5327110..72d386b36 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -61,7 +61,7 @@ def parse_block_manifest( ) inputs_accepting_batches = set(manifest_type.get_parameters_accepting_batches()) inputs_accepting_batches_and_scalars = set( - manifest_type.get_parameters_accepting_mixed_input() + manifest_type.get_parameters_accepting_batches_and_scalars() ) return parse_block_manifest_schema( schema=schema, diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index d372407a3..46e06046a 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -361,7 +361,7 @@ def assembly_manifest_class_methods( setattr(manifest_class, "get_actual_outputs", describe_outputs) accepts_batch_input = ( lambda cls: len(manifest_description.batch_oriented_parameters) > 0 - or len(manifest_description.mixed_parameters) + or len(manifest_description.parameters_with_scalars_and_batches) or manifest_description.accepts_batch_input ) setattr(manifest_class, "accepts_batch_input", classmethod(accepts_batch_input)) @@ -373,13 +373,13 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches", classmethod(get_parameters_accepting_batches), ) - get_parameters_accepting_mixed_input = ( - lambda cls: manifest_description.mixed_parameters + get_parameters_accepting_batches_and_scalars = ( + lambda cls: manifest_description.parameters_with_scalars_and_batches ) setattr( manifest_class, - "get_parameters_accepting_mixed_input", - classmethod(get_parameters_accepting_mixed_input), + "get_parameters_accepting_batches_and_scalars", + classmethod(get_parameters_accepting_batches_and_scalars), ) input_dimensionality_offsets = collect_input_dimensionality_offsets( inputs=manifest_description.inputs diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index eee8a906c..6e6e6a72f 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -110,7 +110,7 @@ class ManifestDescription(BaseModel): description="List of batch-oriented parameters. Value will override `accepts_batch_input` if non-empty " "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", ) - mixed_parameters: List[str] = Field( + parameters_with_scalars_and_batches: List[str] = Field( default_factory=list, description="List of parameters accepting both batches and scalars at the same time. " "Value will override `accepts_batch_input` if non-empty " diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index 7875c6bcb..6ad5db2dc 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -56,7 +56,7 @@ def get_output_dimensionality_offset( @classmethod def accepts_batch_input(cls) -> bool: return len(cls.get_parameters_accepting_batches()) > 0 or len( - cls.get_parameters_accepting_mixed_input() + cls.get_parameters_accepting_batches_and_scalars() ) @classmethod @@ -64,7 +64,7 @@ def get_parameters_accepting_batches(cls) -> List[str]: return [] @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return [] @classmethod diff --git a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py index 522d986f6..0a23fa7eb 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/mixed_input_characteristic_plugin/__init__.py @@ -101,7 +101,7 @@ class MixedInputWithBatchesBlockManifest(WorkflowBlockManifest): ] @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return ["mixed_parameter"] @classmethod @@ -241,7 +241,7 @@ class CompoundMixedInputBlockManifest(WorkflowBlockManifest): compound_parameter: Dict[str, Union[Selector(), Any]] @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return ["compound_parameter"] @classmethod From 2dff5de785d84aebd3a71da66158595deadd228c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 11 Nov 2024 08:15:48 +0100 Subject: [PATCH 28/67] Make linters happy --- inference_cli/lib/cloud_adapter.py | 6 +++++- tests/inference/unit_tests/usage_tracking/test_collector.py | 2 +- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/inference_cli/lib/cloud_adapter.py b/inference_cli/lib/cloud_adapter.py index 82f47e034..8f6e7a607 100644 --- a/inference_cli/lib/cloud_adapter.py +++ b/inference_cli/lib/cloud_adapter.py @@ -83,14 +83,18 @@ """, } + def check_sky_installed(): try: global sky import sky except ImportError as e: - print("Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'") + print( + "Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'" + ) raise e + def _random_char(y): return "".join(random.choice(string.ascii_lowercase) for x in range(y)) diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py index ca93fff7b..f4335ff79 100644 --- a/tests/inference/unit_tests/usage_tracking/test_collector.py +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -764,7 +764,7 @@ def test_zip_usage_payloads_with_different_exec_session_ids(): "fps": 10, "exec_session_id": "session_2", }, - } + }, }, { "fake_api1_hash": { From 89a0aef3d4e466babbdf1617ec2341d0a1e36629 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 11 Nov 2024 09:04:29 +0100 Subject: [PATCH 29/67] Fix bug with Florence block and align blocks expected EE version --- .../workflows/core_steps/analytics/data_aggregator/v1.py | 2 +- .../core/workflows/core_steps/analytics/line_counter/v1.py | 2 +- .../core/workflows/core_steps/analytics/line_counter/v2.py | 2 +- .../workflows/core_steps/analytics/path_deviation/v1.py | 2 +- .../core/workflows/core_steps/analytics/time_in_zone/v1.py | 2 +- .../core/workflows/core_steps/analytics/time_in_zone/v2.py | 2 +- .../workflows/core_steps/classical_cv/camera_focus/v1.py | 2 +- .../core/workflows/core_steps/classical_cv/contours/v1.py | 2 +- .../core_steps/classical_cv/convert_grayscale/v1.py | 2 +- .../workflows/core_steps/classical_cv/dominant_color/v1.py | 2 +- .../core/workflows/core_steps/classical_cv/image_blur/v1.py | 2 +- .../core_steps/classical_cv/image_preprocessing/v1.py | 2 +- .../core_steps/classical_cv/pixel_color_count/v1.py | 2 +- inference/core/workflows/core_steps/classical_cv/sift/v1.py | 2 +- .../workflows/core_steps/classical_cv/sift_comparison/v1.py | 2 +- .../workflows/core_steps/classical_cv/sift_comparison/v2.py | 2 +- .../core_steps/classical_cv/size_measurement/v1.py | 2 +- .../core_steps/classical_cv/template_matching/v1.py | 2 +- .../core/workflows/core_steps/classical_cv/threshold/v1.py | 2 +- .../workflows/core_steps/flow_control/continue_if/v1.py | 2 +- .../workflows/core_steps/flow_control/rate_limiter/v1.py | 2 +- inference/core/workflows/core_steps/formatters/csv/v1.py | 2 +- .../core/workflows/core_steps/formatters/expression/v1.py | 2 +- .../core_steps/formatters/first_non_empty_or_default/v1.py | 2 +- .../core/workflows/core_steps/formatters/json_parser/v1.py | 2 +- .../core_steps/formatters/property_definition/v1.py | 2 +- .../workflows/core_steps/formatters/vlm_as_classifier/v1.py | 2 +- .../workflows/core_steps/formatters/vlm_as_detector/v1.py | 2 +- .../core_steps/fusion/detections_classes_replacement/v1.py | 2 +- .../workflows/core_steps/fusion/detections_consensus/v1.py | 2 +- .../workflows/core_steps/fusion/detections_stitch/v1.py | 2 +- .../workflows/core_steps/fusion/dimension_collapse/v1.py | 2 +- .../core_steps/models/foundation/anthropic_claude/v1.py | 4 ++-- .../core_steps/models/foundation/clip_comparison/v1.py | 2 +- .../core_steps/models/foundation/clip_comparison/v2.py | 2 +- .../workflows/core_steps/models/foundation/cog_vlm/v1.py | 2 +- .../workflows/core_steps/models/foundation/florence2/v1.py | 6 +++++- .../core_steps/models/foundation/google_gemini/v1.py | 4 ++-- .../core_steps/models/foundation/google_vision_ocr/v1.py | 2 +- .../core/workflows/core_steps/models/foundation/lmm/v1.py | 2 +- .../core_steps/models/foundation/lmm_classifier/v1.py | 2 +- .../core/workflows/core_steps/models/foundation/ocr/v1.py | 2 +- .../workflows/core_steps/models/foundation/openai/v1.py | 4 ++-- .../workflows/core_steps/models/foundation/openai/v2.py | 4 ++-- .../core_steps/models/foundation/segment_anything2/v1.py | 2 +- .../models/foundation/stability_ai/inpainting/v1.py | 2 +- .../workflows/core_steps/models/foundation/yolo_world/v1.py | 2 +- .../core_steps/models/roboflow/instance_segmentation/v1.py | 2 +- .../core_steps/models/roboflow/keypoint_detection/v1.py | 2 +- .../models/roboflow/multi_class_classification/v1.py | 2 +- .../models/roboflow/multi_label_classification/v1.py | 2 +- .../core_steps/models/roboflow/object_detection/v1.py | 2 +- .../core_steps/models/third_party/barcode_detection/v1.py | 2 +- .../core_steps/models/third_party/qr_code_detection/v1.py | 2 +- .../workflows/core_steps/sinks/email_notification/v1.py | 2 +- inference/core/workflows/core_steps/sinks/local_file/v1.py | 2 +- .../core_steps/sinks/roboflow/custom_metadata/v1.py | 2 +- .../core_steps/sinks/roboflow/dataset_upload/v1.py | 2 +- .../core_steps/sinks/roboflow/dataset_upload/v2.py | 2 +- inference/core/workflows/core_steps/sinks/webhook/v1.py | 2 +- .../core_steps/transformations/absolute_static_crop/v1.py | 2 +- .../core_steps/transformations/bounding_rect/v1.py | 2 +- .../workflows/core_steps/transformations/byte_tracker/v1.py | 2 +- .../workflows/core_steps/transformations/byte_tracker/v2.py | 2 +- .../workflows/core_steps/transformations/byte_tracker/v3.py | 2 +- .../core_steps/transformations/detection_offset/v1.py | 2 +- .../core_steps/transformations/detections_filter/v1.py | 2 +- .../transformations/detections_transformation/v1.py | 2 +- .../workflows/core_steps/transformations/dynamic_crop/v1.py | 2 +- .../core_steps/transformations/dynamic_zones/v1.py | 2 +- .../workflows/core_steps/transformations/image_slicer/v1.py | 2 +- .../core_steps/transformations/perspective_correction/v1.py | 2 +- .../core_steps/transformations/relative_static_crop/v1.py | 2 +- .../core_steps/transformations/stabilize_detections/v1.py | 2 +- .../core_steps/transformations/stitch_images/v1.py | 2 +- .../core_steps/visualizations/background_color/v1.py | 2 +- .../core/workflows/core_steps/visualizations/blur/v1.py | 2 +- .../workflows/core_steps/visualizations/bounding_box/v1.py | 2 +- .../core/workflows/core_steps/visualizations/circle/v1.py | 2 +- .../core/workflows/core_steps/visualizations/color/v1.py | 2 +- .../core_steps/visualizations/common/base_colorable.py | 2 +- .../core/workflows/core_steps/visualizations/corner/v1.py | 2 +- .../core/workflows/core_steps/visualizations/crop/v1.py | 2 +- .../core/workflows/core_steps/visualizations/dot/v1.py | 2 +- .../core/workflows/core_steps/visualizations/ellipse/v1.py | 2 +- .../core/workflows/core_steps/visualizations/halo/v1.py | 2 +- .../core/workflows/core_steps/visualizations/label/v1.py | 2 +- .../workflows/core_steps/visualizations/line_zone/v1.py | 2 +- .../core/workflows/core_steps/visualizations/mask/v1.py | 2 +- .../core_steps/visualizations/model_comparison/v1.py | 2 +- .../core/workflows/core_steps/visualizations/pixelate/v1.py | 2 +- .../core/workflows/core_steps/visualizations/polygon/v1.py | 2 +- .../workflows/core_steps/visualizations/polygon_zone/v1.py | 2 +- .../core_steps/visualizations/reference_path/v1.py | 2 +- .../core/workflows/core_steps/visualizations/trace/v1.py | 2 +- .../core/workflows/core_steps/visualizations/triangle/v1.py | 2 +- 96 files changed, 104 insertions(+), 100 deletions(-) diff --git a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py index f4589759c..e4bb6adb4 100644 --- a/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py +++ b/inference/core/workflows/core_steps/analytics/data_aggregator/v1.py @@ -321,7 +321,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" INTERVAL_UNIT_TO_SECONDS = { diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v1.py b/inference/core/workflows/core_steps/analytics/line_counter/v1.py index 64c43be2f..ba739345f 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v1.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v1.py @@ -84,7 +84,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LineCounterBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/analytics/line_counter/v2.py b/inference/core/workflows/core_steps/analytics/line_counter/v2.py index 953122763..6035be38c 100644 --- a/inference/core/workflows/core_steps/analytics/line_counter/v2.py +++ b/inference/core/workflows/core_steps/analytics/line_counter/v2.py @@ -89,7 +89,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LineCounterBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py index 2cdca4202..e1eebdd60 100644 --- a/inference/core/workflows/core_steps/analytics/path_deviation/v1.py +++ b/inference/core/workflows/core_steps/analytics/path_deviation/v1.py @@ -85,7 +85,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class PathDeviationAnalyticsBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index 67d31a2c9..63678a6d8 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -99,7 +99,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class TimeInZoneBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index 050d28a81..acb4d0749 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -101,7 +101,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class TimeInZoneBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py index efa32b2cd..3f5ca594a 100644 --- a/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/camera_focus/v1.py @@ -68,7 +68,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CameraFocusBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/contours/v1.py b/inference/core/workflows/core_steps/classical_cv/contours/v1.py index 53851c7d4..8396bc614 100644 --- a/inference/core/workflows/core_steps/classical_cv/contours/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/contours/v1.py @@ -87,7 +87,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImageContoursDetectionBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py index 538a837ff..1e99f363d 100644 --- a/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/convert_grayscale/v1.py @@ -59,7 +59,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ConvertGrayscaleBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py index 8c66cbb4e..205670ab3 100644 --- a/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/dominant_color/v1.py @@ -85,7 +85,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DominantColorBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py index f1d79a8a1..1ab40828a 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_blur/v1.py @@ -78,7 +78,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImageBlurBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py index 3475f739f..d4b4caf59 100644 --- a/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/image_preprocessing/v1.py @@ -124,7 +124,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImagePreprocessingBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py index 99e706ce7..c7611a9a6 100644 --- a/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/pixel_color_count/v1.py @@ -63,7 +63,7 @@ class ColorPixelCountManifest(WorkflowBlockManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/classical_cv/sift/v1.py b/inference/core/workflows/core_steps/classical_cv/sift/v1.py index 8c5f95624..fdb63df31 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift/v1.py @@ -58,7 +58,7 @@ class SIFTDetectionManifest(WorkflowBlockManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py index 8341a03f1..497fa37f6 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v1.py @@ -61,7 +61,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py index afdf48b1b..bed195418 100644 --- a/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py +++ b/inference/core/workflows/core_steps/classical_cv/sift_comparison/v2.py @@ -82,7 +82,7 @@ class SIFTComparisonBlockManifest(WorkflowBlockManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py index ec2c2bba7..522b79a59 100644 --- a/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/size_measurement/v1.py @@ -92,7 +92,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def get_detection_dimensions( diff --git a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py index a58b82f60..de110acdf 100644 --- a/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/template_matching/v1.py @@ -100,7 +100,7 @@ class TemplateMatchingManifest(WorkflowBlockManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" @classmethod def describe_outputs(cls) -> List[OutputDefinition]: diff --git a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py index f091cd48d..f75371037 100644 --- a/inference/core/workflows/core_steps/classical_cv/threshold/v1.py +++ b/inference/core/workflows/core_steps/classical_cv/threshold/v1.py @@ -91,7 +91,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImageThresholdBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py index 5f9411dd6..275cb2a54 100644 --- a/inference/core/workflows/core_steps/flow_control/continue_if/v1.py +++ b/inference/core/workflows/core_steps/flow_control/continue_if/v1.py @@ -78,7 +78,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ContinueIfBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py index cc13b81d2..df381e5dc 100644 --- a/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py +++ b/inference/core/workflows/core_steps/flow_control/rate_limiter/v1.py @@ -74,7 +74,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RateLimiterBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/csv/v1.py b/inference/core/workflows/core_steps/formatters/csv/v1.py index 693bd17e5..aa6bff7de 100644 --- a/inference/core/workflows/core_steps/formatters/csv/v1.py +++ b/inference/core/workflows/core_steps/formatters/csv/v1.py @@ -184,7 +184,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CSVFormatterBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/expression/v1.py b/inference/core/workflows/core_steps/formatters/expression/v1.py index 4934c02f0..f5658d46b 100644 --- a/inference/core/workflows/core_steps/formatters/expression/v1.py +++ b/inference/core/workflows/core_steps/formatters/expression/v1.py @@ -138,7 +138,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ExpressionBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py index fe409e283..558756f7a 100644 --- a/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py +++ b/inference/core/workflows/core_steps/formatters/first_non_empty_or_default/v1.py @@ -56,7 +56,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class FirstNonEmptyOrDefaultBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/json_parser/v1.py b/inference/core/workflows/core_steps/formatters/json_parser/v1.py index a3cad763b..2907d5d1c 100644 --- a/inference/core/workflows/core_steps/formatters/json_parser/v1.py +++ b/inference/core/workflows/core_steps/formatters/json_parser/v1.py @@ -91,7 +91,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class JSONParserBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/property_definition/v1.py b/inference/core/workflows/core_steps/formatters/property_definition/v1.py index 80cc546aa..6570b3a39 100644 --- a/inference/core/workflows/core_steps/formatters/property_definition/v1.py +++ b/inference/core/workflows/core_steps/formatters/property_definition/v1.py @@ -71,7 +71,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class PropertyDefinitionBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py index e4b1fd582..ac6e10c46 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v1.py @@ -93,7 +93,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class VLMAsClassifierBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py index 334ad6c9e..48c50e822 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v1.py @@ -156,7 +156,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class VLMAsDetectorBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py index bad1ce85e..038cd6940 100644 --- a/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_classes_replacement/v1.py @@ -101,7 +101,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionsClassesReplacementBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py index 6c4588604..a22965eb2 100644 --- a/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_consensus/v1.py @@ -170,7 +170,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionsConsensusBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py index 26e6fc92b..375c8ae39 100644 --- a/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py +++ b/inference/core/workflows/core_steps/fusion/detections_stitch/v1.py @@ -111,7 +111,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionsStitchBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index 890875781..c38b9c5df 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -64,7 +64,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DimensionCollapseBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py index 98655132e..69a229c6d 100644 --- a/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/anthropic_claude/v1.py @@ -220,7 +220,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class AntropicClaudeBlockV1(WorkflowBlock): @@ -243,7 +243,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def run( self, diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py index df0a566bd..4118b969c 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v1.py @@ -91,7 +91,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ClipComparisonBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index b325f41fa..28f4b83b8 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -115,7 +115,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ClipComparisonBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py index bd9930a1e..1a6678b11 100644 --- a/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/cog_vlm/v1.py @@ -107,7 +107,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CogVLMBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index b8784e2e4..a11160a83 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -256,6 +256,10 @@ class BlockManifest(WorkflowBlockManifest): def get_parameters_accepting_batches(cls) -> List[str]: return ["images"] + @classmethod + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: + return ["grounding_detection"] + @model_validator(mode="after") def validate(self) -> "BlockManifest": if self.task_type in TASKS_REQUIRING_PROMPT and self.prompt is None: @@ -287,7 +291,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class Florence2BlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py index 6c66ca710..99d4e4608 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_gemini/v1.py @@ -223,7 +223,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class GoogleGeminiBlockV1(WorkflowBlock): @@ -246,7 +246,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def run( self, diff --git a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py index 92d6fb86f..5fa0158e1 100644 --- a/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/google_vision_ocr/v1.py @@ -97,7 +97,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class GoogleVisionOCRBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py index d7872d85b..0234c3b75 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm/v1.py @@ -155,7 +155,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LMMBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py index c5b9da19d..fe3d8ee33 100644 --- a/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/lmm_classifier/v1.py @@ -110,7 +110,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LMMForClassificationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py index d6db83b45..0f540a9a5 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py @@ -88,7 +88,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class OCRModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v1.py b/inference/core/workflows/core_steps/models/foundation/openai/v1.py index beac429bb..d9f10d170 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v1.py @@ -139,7 +139,7 @@ def get_actual_outputs(self) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class OpenAIBlockV1(WorkflowBlock): @@ -162,7 +162,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def run( self, diff --git a/inference/core/workflows/core_steps/models/foundation/openai/v2.py b/inference/core/workflows/core_steps/models/foundation/openai/v2.py index 51bcae86e..1f9d03aca 100644 --- a/inference/core/workflows/core_steps/models/foundation/openai/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/openai/v2.py @@ -218,7 +218,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class OpenAIBlockV2(WorkflowBlock): @@ -241,7 +241,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def run( self, diff --git a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py index 94d5c5ea4..5893248f6 100644 --- a/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/segment_anything2/v1.py @@ -129,7 +129,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class SegmentAnything2BlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py index 9563a1045..6f9d87796 100644 --- a/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/stability_ai/inpainting/v1.py @@ -103,7 +103,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class StabilityAIInpaintingBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index 1050a02ce..b54474a4f 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -111,7 +111,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class YoloWorldModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index e2ba1fd96..3abc2d9cc 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -163,7 +163,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowInstanceSegmentationModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index 44b44e5c4..aacbae6af 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -155,7 +155,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowKeypointDetectionModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index c4531c004..10c5c11d8 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -109,7 +109,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowClassificationModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index 946b8a917..290982e50 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -109,7 +109,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowMultiLabelClassificationModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index 128efb409..608a083df 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -144,7 +144,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowObjectDetectionModelBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py index 4579f487f..e48c1f0a1 100644 --- a/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/barcode_detection/v1.py @@ -68,7 +68,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class BarcodeDetectorBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py index 801c4d818..ff9ed2dd6 100644 --- a/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py +++ b/inference/core/workflows/core_steps/models/third_party/qr_code_detection/v1.py @@ -70,7 +70,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class QRCodeDetectorBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/email_notification/v1.py b/inference/core/workflows/core_steps/sinks/email_notification/v1.py index 22432df4c..7fa250823 100644 --- a/inference/core/workflows/core_steps/sinks/email_notification/v1.py +++ b/inference/core/workflows/core_steps/sinks/email_notification/v1.py @@ -298,7 +298,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class EmailNotificationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/local_file/v1.py b/inference/core/workflows/core_steps/sinks/local_file/v1.py index ee067ba9d..f7fcfc11f 100644 --- a/inference/core/workflows/core_steps/sinks/local_file/v1.py +++ b/inference/core/workflows/core_steps/sinks/local_file/v1.py @@ -143,7 +143,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LocalFileSinkBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index 3e1aeb0eb..7fa2ccfcd 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -94,7 +94,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowCustomMetadataBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py index 1ec9db3d7..2080a21f9 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v1.py @@ -206,7 +206,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowDatasetUploadBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py index ba3293530..bbeec7b6a 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/dataset_upload/v2.py @@ -176,7 +176,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RoboflowDatasetUploadBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/sinks/webhook/v1.py b/inference/core/workflows/core_steps/sinks/webhook/v1.py index ed4d8b4eb..3652b25fc 100644 --- a/inference/core/workflows/core_steps/sinks/webhook/v1.py +++ b/inference/core/workflows/core_steps/sinks/webhook/v1.py @@ -327,7 +327,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class WebhookSinkBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py index 5da88e680..755f04d02 100644 --- a/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/absolute_static_crop/v1.py @@ -72,7 +72,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class AbsoluteStaticCropBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py index 8514f6cb4..ad61a3507 100644 --- a/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py +++ b/inference/core/workflows/core_steps/transformations/bounding_rect/v1.py @@ -66,7 +66,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def calculate_minimum_bounding_rectangle( diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py index 4360c5324..7ab759eab 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v1.py @@ -97,7 +97,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.1.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ByteTrackerBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py index f7152ae32..0be472e0e 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v2.py @@ -103,7 +103,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ByteTrackerBlockV2(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py index 6864a54f0..e901ce456 100644 --- a/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py +++ b/inference/core/workflows/core_steps/transformations/byte_tracker/v3.py @@ -128,7 +128,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ByteTrackerBlockV3(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py index 284cdece5..9bc42da67 100644 --- a/inference/core/workflows/core_steps/transformations/detection_offset/v1.py +++ b/inference/core/workflows/core_steps/transformations/detection_offset/v1.py @@ -90,7 +90,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionOffsetBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py index 198bedc98..46a10065c 100644 --- a/inference/core/workflows/core_steps/transformations/detections_filter/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_filter/v1.py @@ -114,7 +114,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionsFilterBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py index ae1c5d0e8..c947569cd 100644 --- a/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py +++ b/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py @@ -133,7 +133,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DetectionsTransformationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py index 4b891e20e..393ed5758 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_crop/v1.py @@ -122,7 +122,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DynamicCropBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py index befbab639..f687296a3 100644 --- a/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py +++ b/inference/core/workflows/core_steps/transformations/dynamic_zones/v1.py @@ -73,7 +73,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" def calculate_simplified_polygon( diff --git a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py index 6529a0dc2..c502212fb 100644 --- a/inference/core/workflows/core_steps/transformations/image_slicer/v1.py +++ b/inference/core/workflows/core_steps/transformations/image_slicer/v1.py @@ -101,7 +101,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ImageSlicerBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py index 010394e23..c408f0615 100644 --- a/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py +++ b/inference/core/workflows/core_steps/transformations/perspective_correction/v1.py @@ -121,7 +121,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" def pick_largest_perspective_polygons( diff --git a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py index b387f68fc..4a94eaee0 100644 --- a/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py +++ b/inference/core/workflows/core_steps/transformations/relative_static_crop/v1.py @@ -73,7 +73,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class RelativeStaticCropBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py index 7fd6758a9..4b75c1bbc 100644 --- a/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py +++ b/inference/core/workflows/core_steps/transformations/stabilize_detections/v1.py @@ -83,7 +83,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class StabilizeTrackedDetectionsBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py index 151d678c0..d8ff6275b 100644 --- a/inference/core/workflows/core_steps/transformations/stitch_images/v1.py +++ b/inference/core/workflows/core_steps/transformations/stitch_images/v1.py @@ -80,7 +80,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class StitchImagesBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/visualizations/background_color/v1.py b/inference/core/workflows/core_steps/visualizations/background_color/v1.py index 733693abf..d75fdf600 100644 --- a/inference/core/workflows/core_steps/visualizations/background_color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/background_color/v1.py @@ -59,7 +59,7 @@ class BackgroundColorManifest(PredictionsVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class BackgroundColorVisualizationBlockV1(PredictionsVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/blur/v1.py b/inference/core/workflows/core_steps/visualizations/blur/v1.py index 807e12950..935e0ebe0 100644 --- a/inference/core/workflows/core_steps/visualizations/blur/v1.py +++ b/inference/core/workflows/core_steps/visualizations/blur/v1.py @@ -44,7 +44,7 @@ class BlurManifest(PredictionsVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class BlurVisualizationBlockV1(PredictionsVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py index 3e6e66bab..11373d882 100644 --- a/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py +++ b/inference/core/workflows/core_steps/visualizations/bounding_box/v1.py @@ -54,7 +54,7 @@ class BoundingBoxManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class BoundingBoxVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/circle/v1.py b/inference/core/workflows/core_steps/visualizations/circle/v1.py index ad96a8f7f..862d627ae 100644 --- a/inference/core/workflows/core_steps/visualizations/circle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/circle/v1.py @@ -46,7 +46,7 @@ class CircleManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CircleVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/color/v1.py b/inference/core/workflows/core_steps/visualizations/color/v1.py index fcffc9821..0dafe27d7 100644 --- a/inference/core/workflows/core_steps/visualizations/color/v1.py +++ b/inference/core/workflows/core_steps/visualizations/color/v1.py @@ -47,7 +47,7 @@ class ColorManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ColorVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py index 810f4f3db..bf15aefea 100644 --- a/inference/core/workflows/core_steps/visualizations/common/base_colorable.py +++ b/inference/core/workflows/core_steps/visualizations/common/base_colorable.py @@ -109,7 +109,7 @@ class ColorableVisualizationManifest(PredictionsVisualizationManifest, ABC): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ColorableVisualizationBlock(PredictionsVisualizationBlock, ABC): diff --git a/inference/core/workflows/core_steps/visualizations/corner/v1.py b/inference/core/workflows/core_steps/visualizations/corner/v1.py index 866ecb26d..75ad8e2cb 100644 --- a/inference/core/workflows/core_steps/visualizations/corner/v1.py +++ b/inference/core/workflows/core_steps/visualizations/corner/v1.py @@ -52,7 +52,7 @@ class CornerManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CornerVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/crop/v1.py b/inference/core/workflows/core_steps/visualizations/crop/v1.py index bdf399ace..548df08a5 100644 --- a/inference/core/workflows/core_steps/visualizations/crop/v1.py +++ b/inference/core/workflows/core_steps/visualizations/crop/v1.py @@ -74,7 +74,7 @@ class CropManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class CropVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/dot/v1.py b/inference/core/workflows/core_steps/visualizations/dot/v1.py index 1b63013e8..8504be912 100644 --- a/inference/core/workflows/core_steps/visualizations/dot/v1.py +++ b/inference/core/workflows/core_steps/visualizations/dot/v1.py @@ -75,7 +75,7 @@ class DotManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class DotVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py index 9eeeb9f9b..b9173f36a 100644 --- a/inference/core/workflows/core_steps/visualizations/ellipse/v1.py +++ b/inference/core/workflows/core_steps/visualizations/ellipse/v1.py @@ -58,7 +58,7 @@ class EllipseManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class EllipseVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/halo/v1.py b/inference/core/workflows/core_steps/visualizations/halo/v1.py index c6e085fb4..1c922380a 100644 --- a/inference/core/workflows/core_steps/visualizations/halo/v1.py +++ b/inference/core/workflows/core_steps/visualizations/halo/v1.py @@ -68,7 +68,7 @@ class HaloManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class HaloVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/label/v1.py b/inference/core/workflows/core_steps/visualizations/label/v1.py index e10342dba..9a8da25c1 100644 --- a/inference/core/workflows/core_steps/visualizations/label/v1.py +++ b/inference/core/workflows/core_steps/visualizations/label/v1.py @@ -113,7 +113,7 @@ class LabelManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LabelVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py index 818419a90..77f191f8c 100644 --- a/inference/core/workflows/core_steps/visualizations/line_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/line_zone/v1.py @@ -90,7 +90,7 @@ class LineCounterZoneVisualizationManifest(VisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class LineCounterZoneVisualizationBlockV1(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/mask/v1.py b/inference/core/workflows/core_steps/visualizations/mask/v1.py index d762ac77e..8717cb977 100644 --- a/inference/core/workflows/core_steps/visualizations/mask/v1.py +++ b/inference/core/workflows/core_steps/visualizations/mask/v1.py @@ -58,7 +58,7 @@ class MaskManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class MaskVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py index 15d75938e..5187a8a34 100644 --- a/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py +++ b/inference/core/workflows/core_steps/visualizations/model_comparison/v1.py @@ -99,7 +99,7 @@ class ModelComparisonManifest(VisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.0.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ModelComparisonVisualizationBlockV1(PredictionsVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py index b788ec424..40f7e7212 100644 --- a/inference/core/workflows/core_steps/visualizations/pixelate/v1.py +++ b/inference/core/workflows/core_steps/visualizations/pixelate/v1.py @@ -44,7 +44,7 @@ class PixelateManifest(PredictionsVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class PixelateVisualizationBlockV1(PredictionsVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/polygon/v1.py b/inference/core/workflows/core_steps/visualizations/polygon/v1.py index 2858b7c3a..b8193c247 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon/v1.py @@ -60,7 +60,7 @@ class PolygonManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class PolygonVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py index 406155150..1badaba98 100644 --- a/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py +++ b/inference/core/workflows/core_steps/visualizations/polygon_zone/v1.py @@ -62,7 +62,7 @@ class PolygonZoneVisualizationManifest(VisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class PolygonZoneVisualizationBlockV1(VisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py index 2c921b856..6b85d6808 100644 --- a/inference/core/workflows/core_steps/visualizations/reference_path/v1.py +++ b/inference/core/workflows/core_steps/visualizations/reference_path/v1.py @@ -72,7 +72,7 @@ def validate_thickness_greater_than_zero( @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class ReferencePathVisualizationBlockV1(WorkflowBlock): diff --git a/inference/core/workflows/core_steps/visualizations/trace/v1.py b/inference/core/workflows/core_steps/visualizations/trace/v1.py index 46309f5d5..67332ff4a 100644 --- a/inference/core/workflows/core_steps/visualizations/trace/v1.py +++ b/inference/core/workflows/core_steps/visualizations/trace/v1.py @@ -77,7 +77,7 @@ def ensure_max_entries_per_file_is_correct(cls, value: Any) -> Any: @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class TraceVisualizationBlockV1(ColorableVisualizationBlock): diff --git a/inference/core/workflows/core_steps/visualizations/triangle/v1.py b/inference/core/workflows/core_steps/visualizations/triangle/v1.py index 6b8637617..ce0ecd562 100644 --- a/inference/core/workflows/core_steps/visualizations/triangle/v1.py +++ b/inference/core/workflows/core_steps/visualizations/triangle/v1.py @@ -79,7 +79,7 @@ class TriangleManifest(ColorableVisualizationManifest): @classmethod def get_execution_engine_compatibility(cls) -> Optional[str]: - return ">=1.2.0,<2.0.0" + return ">=1.3.0,<2.0.0" class TriangleVisualizationBlockV1(ColorableVisualizationBlock): From 6f3ca50dbf7853903207dcb7a426b6c903c754c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 11 Nov 2024 09:10:02 +0100 Subject: [PATCH 30/67] Fix typo in docs --- docs/workflows/execution_engine_changelog.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index f35be618c..562bdaec3 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -78,7 +78,7 @@ format introduced **at the level of Execution Engine**). As a result of the chan we propose the switch into two methods explicitly defining the parameters that are expected to be fed with batch-oriented data (`block_manifest.get_parameters_accepting_batches()`) and parameters capable of taking both *batches* and *scalar* values - (`block_manifest.get_parameters_accepting_mixed_input()`). Return value of `block_manifest.accepts_batch_input()` + (`block_manifest.get_parameters_accepting_batches_and_scalars()`). Return value of `block_manifest.accepts_batch_input()` is built upon the results of two new methods. The change is **non-breaking**, as any existing block which was capable of processing batches must have implemented `block_manifest.accepts_batch_input()` method returning `True` and use appropriate selector type annotation which indicated batch-oriented data. @@ -247,7 +247,7 @@ subsets of steps**, enabling building such tools as debuggers. return ["predictions"] @classmethod - def get_parameters_accepting_mixed_input(cls) -> List[str]: + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return ["data"] ``` @@ -256,7 +256,7 @@ subsets of steps**, enabling building such tools as debuggers. * the `data` property in the original example was able to accept both **batches** of data and **scalar** values due to selector of batch-orienetd data (`StepOutputSelector`) and *scalar* data (`WorkflowParameterSelector`). Now the same is manifested by `Selector(...)` type - annotation and return value from `get_parameters_accepting_mixed_input(...)` method. + annotation and return value from `get_parameters_accepting_batches_and_scalars(...)` method. ??? Hint "New inputs in Workflows definitions" From a42fb71df916cf020f3962e03c69419d01d9276f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Mon, 11 Nov 2024 09:52:23 +0100 Subject: [PATCH 31/67] Fix issue with docs generation --- docs/workflows/blocks.md | 2 + docs/workflows/execution_engine_changelog.md | 2 +- docs/workflows/kinds.md | 50 +++++++++---------- .../execution_engine/entities/types.py | 3 +- .../introspection/connections_discovery.py | 2 + .../v1/introspection/inputs_discovery.py | 14 +++--- 6 files changed, 39 insertions(+), 34 deletions(-) diff --git a/docs/workflows/blocks.md b/docs/workflows/blocks.md index 0f620e4f7..e69e3ee5e 100644 --- a/docs/workflows/blocks.md +++ b/docs/workflows/blocks.md @@ -88,12 +88,14 @@ hide:

+

+

diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 562bdaec3..b8decc38b 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -243,7 +243,7 @@ subsets of steps**, enabling building such tools as debuggers. confidence: Selector(kind=[FLOAT_KIND]) @classmethod - def get_parameters_accepting_batches(cls) -> List[str]: + def get_parameters_accepting_batches(cls)W -> List[str]: return ["predictions"] @classmethod diff --git a/docs/workflows/kinds.md b/docs/workflows/kinds.md index c81c92905..ed14bd8ab 100644 --- a/docs/workflows/kinds.md +++ b/docs/workflows/kinds.md @@ -68,37 +68,37 @@ providing compile-time verification of Workflows definitions. ## Kinds declared in Roboflow plugins -* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name -* [`point`](/workflows/kinds/point): Single point in 2D -* [`bytes`](/workflows/kinds/bytes): This kind represent bytes +* [`image`](/workflows/kinds/image): Image in workflows +* [`float`](/workflows/kinds/float): Float value +* [`numpy_array`](/workflows/kinds/numpy_array): Numpy array +* [`prediction_type`](/workflows/kinds/prediction_type): String value with type of prediction * [`language_model_output`](/workflows/kinds/language_model_output): LLM / VLM output -* [`dictionary`](/workflows/kinds/dictionary): Dictionary -* [`video_metadata`](/workflows/kinds/video_metadata): Video image metadata * [`image_metadata`](/workflows/kinds/image_metadata): Dictionary with image metadata required by supervision -* [`zone`](/workflows/kinds/zone): Definition of polygon zone -* [`rgb_color`](/workflows/kinds/rgb_color): RGB color -* [`string`](/workflows/kinds/string): String value -* [`serialised_payloads`](/workflows/kinds/serialised_payloads): Serialised element that is usually accepted by sink -* [`detection`](/workflows/kinds/detection): Single element of detections-based prediction (like `object_detection_prediction`) -* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any type -* [`numpy_array`](/workflows/kinds/numpy_array): Numpy array +* [`keypoint_detection_prediction`](/workflows/kinds/keypoint_detection_prediction): Prediction with detected bounding boxes and detected keypoints in form of sv.Detections(...) object +* [`top_class`](/workflows/kinds/top_class): String value representing top class predicted by classification model +* [`video_metadata`](/workflows/kinds/video_metadata): Video image metadata * [`qr_code_detection`](/workflows/kinds/qr_code_detection): Prediction with QR code detection -* [`float`](/workflows/kinds/float): Float value -* [`*`](/workflows/kinds/*): Equivalent of any element -* [`bar_code_detection`](/workflows/kinds/bar_code_detection): Prediction with barcode detection +* [`contours`](/workflows/kinds/contours): List of numpy arrays where each array represents contour points +* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id * [`object_detection_prediction`](/workflows/kinds/object_detection_prediction): Prediction with detected bounding boxes in form of sv.Detections(...) object +* [`roboflow_project`](/workflows/kinds/roboflow_project): Roboflow project name * [`image_keypoints`](/workflows/kinds/image_keypoints): Image keypoints detected by classical Computer Vision method -* [`keypoint_detection_prediction`](/workflows/kinds/keypoint_detection_prediction): Prediction with detected bounding boxes and detected keypoints in form of sv.Detections(...) object +* [`list_of_values`](/workflows/kinds/list_of_values): List of values of any type * [`float_zero_to_one`](/workflows/kinds/float_zero_to_one): `float` value in range `[0.0, 1.0]` -* [`image`](/workflows/kinds/image): Image in workflows -* [`roboflow_model_id`](/workflows/kinds/roboflow_model_id): Roboflow model id -* [`integer`](/workflows/kinds/integer): Integer value -* [`top_class`](/workflows/kinds/top_class): String value representing top class predicted by classification model -* [`boolean`](/workflows/kinds/boolean): Boolean flag -* [`roboflow_api_key`](/workflows/kinds/roboflow_api_key): Roboflow API key * [`instance_segmentation_prediction`](/workflows/kinds/instance_segmentation_prediction): Prediction with detected bounding boxes and segmentation masks in form of sv.Detections(...) object -* [`contours`](/workflows/kinds/contours): List of numpy arrays where each array represents contour points -* [`prediction_type`](/workflows/kinds/prediction_type): String value with type of prediction -* [`parent_id`](/workflows/kinds/parent_id): Identifier of parent for step output +* [`rgb_color`](/workflows/kinds/rgb_color): RGB color +* [`boolean`](/workflows/kinds/boolean): Boolean flag +* [`bar_code_detection`](/workflows/kinds/bar_code_detection): Prediction with barcode detection * [`classification_prediction`](/workflows/kinds/classification_prediction): Predictions from classifier +* [`string`](/workflows/kinds/string): String value +* [`parent_id`](/workflows/kinds/parent_id): Identifier of parent for step output +* [`point`](/workflows/kinds/point): Single point in 2D +* [`bytes`](/workflows/kinds/bytes): This kind represent bytes +* [`serialised_payloads`](/workflows/kinds/serialised_payloads): Serialised element that is usually accepted by sink +* [`dictionary`](/workflows/kinds/dictionary): Dictionary +* [`*`](/workflows/kinds/*): Equivalent of any element +* [`detection`](/workflows/kinds/detection): Single element of detections-based prediction (like `object_detection_prediction`) +* [`integer`](/workflows/kinds/integer): Integer value +* [`zone`](/workflows/kinds/zone): Definition of polygon zone +* [`roboflow_api_key`](/workflows/kinds/roboflow_api_key): Roboflow API key diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index bbc7d3079..be94ec362 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1025,6 +1025,7 @@ def __hash__(self) -> int: STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" BATCH_AS_SELECTED_ELEMENT = "batch" SCALAR_AS_SELECTED_ELEMENT = "scalar" +ANY_DATA_AS_SELECTED_ELEMENT = "any_data" StepSelector = Annotated[ str, @@ -1132,7 +1133,7 @@ def Selector( kind = [WILDCARD_KIND] json_schema_extra = { REFERENCE_KEY: True, - SELECTED_ELEMENT_KEY: "any", + SELECTED_ELEMENT_KEY: ANY_DATA_AS_SELECTED_ELEMENT, KIND_KEY: [k.dict() for k in kind], SELECTOR_POINTS_TO_BATCH_KEY: "dynamic", } diff --git a/inference/core/workflows/execution_engine/introspection/connections_discovery.py b/inference/core/workflows/execution_engine/introspection/connections_discovery.py index 8ad1b5831..a8cd19377 100644 --- a/inference/core/workflows/execution_engine/introspection/connections_discovery.py +++ b/inference/core/workflows/execution_engine/introspection/connections_discovery.py @@ -2,6 +2,7 @@ from typing import Dict, Generator, List, Set, Tuple, Type from inference.core.workflows.execution_engine.entities.types import ( + ANY_DATA_AS_SELECTED_ELEMENT, BATCH_AS_SELECTED_ELEMENT, STEP_AS_SELECTED_ELEMENT, STEP_OUTPUT_AS_SELECTED_ELEMENT, @@ -44,6 +45,7 @@ def discover_blocks_connections( compatible_elements = { STEP_OUTPUT_AS_SELECTED_ELEMENT, BATCH_AS_SELECTED_ELEMENT, + ANY_DATA_AS_SELECTED_ELEMENT, } coarse_input_kind2schemas = convert_kinds_mapping_to_block_wise_format( detailed_input_kind2schemas=detailed_input_kind2schemas, diff --git a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py index 3dfb1d746..b660870b4 100644 --- a/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py +++ b/inference/core/workflows/execution_engine/v1/introspection/inputs_discovery.py @@ -36,7 +36,7 @@ "workflow_video_metadata": {"WorkflowVideoMetadata"}, "workflow_image": {"WorkflowImage", "InferenceImage"}, "workflow_parameter": {"WorkflowParameter", "InferenceParameter"}, - "any": { + "any_data": { "WorkflowVideoMetadata", "WorkflowImage", "InferenceImage", @@ -46,15 +46,15 @@ }, } INPUT_TYPE_TO_SELECTED_ELEMENT = { - "WorkflowVideoMetadata": {"workflow_video_metadata", "any"}, - "WorkflowImage": {"workflow_image", "any"}, - "InferenceImage": {"workflow_image", "any"}, - "WorkflowParameter": {"workflow_parameter", "any"}, - "InferenceParameter": {"workflow_parameter", "any"}, + "WorkflowVideoMetadata": {"workflow_video_metadata", "any_data"}, + "WorkflowImage": {"workflow_image", "any_data"}, + "InferenceImage": {"workflow_image", "any_data"}, + "WorkflowParameter": {"workflow_parameter", "any_data"}, + "InferenceParameter": {"workflow_parameter", "any_data"}, "WorkflowBatchInput": { "workflow_image", "workflow_video_metadata", - "any", + "any_data", }, } From 8f5dd02472d24a7c84f730d8122fa3ae8b726954 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Tue, 12 Nov 2024 01:04:40 +0000 Subject: [PATCH 32/67] Florence fts working --- .../core/workflows/core_steps/common/vlms.py | 9 ++++++ .../models/foundation/florence2/v1.py | 30 +++++++++++-------- inference/models/transformers/transformers.py | 4 +-- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/inference/core/workflows/core_steps/common/vlms.py b/inference/core/workflows/core_steps/common/vlms.py index c85727337..4252100b9 100644 --- a/inference/core/workflows/core_steps/common/vlms.py +++ b/inference/core/workflows/core_steps/common/vlms.py @@ -80,3 +80,12 @@ "description": "Model returns a JSON response with the specified fields", }, } + + +FLORENCE_TASKS_METADATA = { + "unstructured": { + "name": "Unstructured Prompt", + "description": "Use free-form prompt to generate a response. Useful with finetuned models.", + }, + **VLM_TASKS_METADATA, +} diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 930977a6a..72959a894 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -8,7 +8,7 @@ from inference.core.entities.requests.inference import LMMInferenceRequest from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.core_steps.common.vlms import VLM_TASKS_METADATA +from inference.core.workflows.core_steps.common.vlms import FLORENCE_TASKS_METADATA from inference.core.workflows.execution_engine.entities.base import ( Batch, OutputDefinition, @@ -21,6 +21,7 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, + ROBOFLOW_MODEL_ID_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, @@ -77,12 +78,13 @@ }, {"task_type": "detection-grounded-ocr", "florence_task": ""}, {"task_type": "region-proposal", "florence_task": ""}, + {"task_type": "unstructured", "florence_task": ""} ] TASK_TYPE_TO_FLORENCE_TASK = { task["task_type"]: task["florence_task"] for task in SUPPORTED_TASK_TYPES_LIST } RELEVANT_TASKS_METADATA = { - k: v for k, v in VLM_TASKS_METADATA.items() if k in TASK_TYPE_TO_FLORENCE_TASK + k: v for k, v in FLORENCE_TASKS_METADATA.items() if k in TASK_TYPE_TO_FLORENCE_TASK } RELEVANT_TASKS_DOCS_DESCRIPTION = "\n\n".join( f"* **{v['name']}** (`{k}`) - {v['description']}" @@ -127,6 +129,7 @@ TASKS_REQUIRING_PROMPT = { "phrase-grounded-object-detection", "phrase-grounded-instance-segmentation", + "unstructured", } TASKS_REQUIRING_CLASSES = { "open-vocabulary-object-detection", @@ -164,13 +167,11 @@ class BlockManifest(WorkflowBlockManifest): ) type: Literal["roboflow_core/florence_2@v1"] images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_version: Union[ - WorkflowParameterSelector(kind=[STRING_KIND]), - Literal["florence-2-base", "florence-2-large"], - ] = Field( + model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = Field( default="florence-2-base", description="Model to be used", examples=["florence-2-base"], + json_schema_extra={"always_visible": True}, ) task_type: TaskType = Field( default="open-vocabulary-object-detection", @@ -317,7 +318,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def run( self, images: Batch[WorkflowImageData], - model_version: str, + model_id: str, task_type: TaskType, prompt: Optional[str], classes: Optional[List[str]], @@ -330,7 +331,7 @@ def run( return self.run_locally( images=images, task_type=task_type, - model_version=model_version, + model_id=model_id, prompt=prompt, classes=classes, grounding_detection=grounding_detection, @@ -348,7 +349,7 @@ def run( def run_locally( self, images: Batch[WorkflowImageData], - model_version: str, + model_id: str, task_type: TaskType, prompt: Optional[str], classes: Optional[List[str]], @@ -374,7 +375,7 @@ def run_locally( grounding_selection_mode=grounding_selection_mode, ) self._model_manager.add_model( - model_id=model_version, + model_id=model_id, api_key=self._api_key, ) predictions = [] @@ -387,15 +388,18 @@ def run_locally( continue request = LMMInferenceRequest( api_key=self._api_key, - model_id=model_version, + model_id=model_id, image=image, source="workflow-execution", prompt=task_type + (single_prompt or ""), ) prediction = self._model_manager.infer_from_request_sync( - model_id=model_version, request=request + model_id=model_id, request=request ) - prediction_data = prediction.response[task_type] + if task_type == "": + prediction_data = prediction.response[list(prediction.response.keys())[0]] + else: + prediction_data = prediction.response[task_type] if task_type in TASKS_TO_EXTRACT_LABELS_AS_CLASSES: classes = prediction_data.get("labels", []) predictions.append( diff --git a/inference/models/transformers/transformers.py b/inference/models/transformers/transformers.py index 8a1ed382b..3d19ecb36 100644 --- a/inference/models/transformers/transformers.py +++ b/inference/models/transformers/transformers.py @@ -126,10 +126,12 @@ def predict(self, image_in: Image.Image, prompt="", history=None, **kwargs): max_new_tokens=1000, do_sample=False, early_stopping=False, + no_repeat_ngram_size=0, ) generation = generation[0] if self.generation_includes_input: generation = generation[input_len:] + decoded = self.processor.decode( generation, skip_special_tokens=self.skip_special_tokens ) @@ -151,7 +153,6 @@ def get_infer_bucket_file_list(self) -> list: "config.json", "special_tokens_map.json", "generation_config.json", - "model.safetensors.index.json", "tokenizer.json", re.compile(r"model-\d{5}-of-\d{5}\.safetensors"), "preprocessor_config.json", @@ -286,7 +287,6 @@ def get_infer_bucket_file_list(self) -> list: "adapter_config.json", "special_tokens_map.json", "tokenizer.json", - "tokenizer.model", "adapter_model.safetensors", "preprocessor_config.json", "tokenizer_config.json", From 69887c6be9cfbfa883cbb5c65cdc222218323db3 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Tue, 12 Nov 2024 18:58:45 +0100 Subject: [PATCH 33/67] Add turn server configuration to webrtc connection --- inference/core/env.py | 4 ++++ .../stream_manager/manager_app/webrtc.py | 21 +++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/inference/core/env.py b/inference/core/env.py index 6eb060216..9a1ca0a07 100644 --- a/inference/core/env.py +++ b/inference/core/env.py @@ -452,3 +452,7 @@ WORKFLOW_BLOCKS_WRITE_DIRECTORY = os.getenv("WORKFLOW_BLOCKS_WRITE_DIRECTORY") DEDICATED_DEPLOYMENT_ID = os.getenv("DEDICATED_DEPLOYMENT_ID") + +WEBRTC_TURN_IP = os.getenv("WEBRTC_TURN_IP") +WEBRTC_TURN_USERNAME = os.getenv("WEBRTC_TURN_USERNAME") +WEBRTC_TURN_SHARED_SECRET = os.getenv("WEBRTC_TURN_SHARED_SECRET") diff --git a/inference/core/interfaces/stream_manager/manager_app/webrtc.py b/inference/core/interfaces/stream_manager/manager_app/webrtc.py index 24431955b..3c01edf87 100644 --- a/inference/core/interfaces/stream_manager/manager_app/webrtc.py +++ b/inference/core/interfaces/stream_manager/manager_app/webrtc.py @@ -5,13 +5,24 @@ from typing import Dict, Optional, Tuple import numpy as np -from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack +from aiortc import ( + RTCConfiguration, + RTCIceServer, + RTCPeerConnection, + RTCSessionDescription, + VideoStreamTrack, +) from aiortc.contrib.media import MediaRelay from aiortc.mediastreams import MediaStreamError from aiortc.rtcrtpreceiver import RemoteStreamTrack from av import VideoFrame from inference.core import logger +from inference.core.env import ( + WEBRTC_TURN_IP, + WEBRTC_TURN_SHARED_SECRET, + WEBRTC_TURN_USERNAME, +) from inference.core.interfaces.camera.entities import ( SourceProperties, VideoFrameProducer, @@ -218,8 +229,14 @@ async def init_rtc_peer_connection( webcam_fps=webcam_fps, ) + turn_server = RTCIceServer( + urls=[f"turn:{WEBRTC_TURN_IP}:3478"], + username=WEBRTC_TURN_USERNAME, + credential=WEBRTC_TURN_SHARED_SECRET, + ) peer_connection = RTCPeerConnectionWithFPS( - video_transform_track=video_transform_track + video_transform_track=video_transform_track, + configuration=RTCConfiguration(iceServers=[turn_server]), ) relay = MediaRelay() From c3a3dd8b43ea56f6d6f7e0f755d1bc1bf0ba7097 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Tue, 12 Nov 2024 19:09:23 +0000 Subject: [PATCH 34/67] Inference tweaks --- inference/core/workflows/core_steps/loader.py | 4 ++ .../models/foundation/florence2/v1.py | 63 ++++++++++--------- 2 files changed, 37 insertions(+), 30 deletions(-) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index efc67723f..476f2d53b 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -121,6 +121,9 @@ from inference.core.workflows.core_steps.models.foundation.florence2.v1 import ( Florence2BlockV1, ) +from inference.core.workflows.core_steps.models.foundation.florence2.v2 import ( + Florence2BlockV2, +) from inference.core.workflows.core_steps.models.foundation.google_gemini.v1 import ( GoogleGeminiBlockV1, ) @@ -387,6 +390,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: DotVisualizationBlockV1, EllipseVisualizationBlockV1, Florence2BlockV1, + Florence2BlockV2, GoogleGeminiBlockV1, GoogleVisionOCRBlockV1, HaloVisualizationBlockV1, diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 72959a894..ae2b9b18d 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -21,7 +21,6 @@ LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, - ROBOFLOW_MODEL_ID_KIND, STRING_KIND, ImageInputField, StepOutputImageSelector, @@ -78,7 +77,7 @@ }, {"task_type": "detection-grounded-ocr", "florence_task": ""}, {"task_type": "region-proposal", "florence_task": ""}, - {"task_type": "unstructured", "florence_task": ""} + {"task_type": "unstructured", "florence_task": ""}, ] TASK_TYPE_TO_FLORENCE_TASK = { task["task_type"]: task["florence_task"] for task in SUPPORTED_TASK_TYPES_LIST @@ -150,29 +149,8 @@ } -class BlockManifest(WorkflowBlockManifest): - model_config = ConfigDict( - json_schema_extra={ - "name": "Florence-2 Model", - "version": "v1", - "short_description": "Run Florence-2 on an image", - "long_description": LONG_DESCRIPTION, - "license": "Apache-2.0", - "block_type": "model", - "search_keywords": ["Florence", "Florence-2", "Microsoft"], - "is_vlm_block": True, - "task_type_property": "task_type", - }, - protected_namespaces=(), - ) - type: Literal["roboflow_core/florence_2@v1"] +class BaseManifest(WorkflowBlockManifest): images: Union[WorkflowImageSelector, StepOutputImageSelector] = ImageInputField - model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = Field( - default="florence-2-base", - description="Model to be used", - examples=["florence-2-base"], - json_schema_extra={"always_visible": True}, - ) task_type: TaskType = Field( default="open-vocabulary-object-detection", description="Task type to be performed by model. " @@ -294,6 +272,31 @@ def describe_outputs(cls) -> List[OutputDefinition]: def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.0.0,<2.0.0" +class BlockManifest(BaseManifest): + type: Literal["roboflow_core/florence_2@v1"] + model_version: Union[ + WorkflowParameterSelector(kind=[STRING_KIND]), + Literal["florence-2-base", "florence-2-large"], + ] = Field( + default="florence-2-base", + description="Model to be used", + examples=["florence-2-base"], + ) + + model_config = ConfigDict( + json_schema_extra={ + "name": "Florence-2 Model", + "version": "v1", + "short_description": "Run Florence-2 on an image", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + "search_keywords": ["Florence", "Florence-2", "Microsoft"], + "is_vlm_block": True, + "task_type_property": "task_type", + }, + protected_namespaces=(), + ) class Florence2BlockV1(WorkflowBlock): @@ -318,7 +321,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: def run( self, images: Batch[WorkflowImageData], - model_id: str, + model_version: str, task_type: TaskType, prompt: Optional[str], classes: Optional[List[str]], @@ -331,7 +334,7 @@ def run( return self.run_locally( images=images, task_type=task_type, - model_id=model_id, + model_version=model_version, prompt=prompt, classes=classes, grounding_detection=grounding_detection, @@ -349,7 +352,7 @@ def run( def run_locally( self, images: Batch[WorkflowImageData], - model_id: str, + model_version: str, task_type: TaskType, prompt: Optional[str], classes: Optional[List[str]], @@ -375,7 +378,7 @@ def run_locally( grounding_selection_mode=grounding_selection_mode, ) self._model_manager.add_model( - model_id=model_id, + model_id=model_version, api_key=self._api_key, ) predictions = [] @@ -388,13 +391,13 @@ def run_locally( continue request = LMMInferenceRequest( api_key=self._api_key, - model_id=model_id, + model_id=model_version, image=image, source="workflow-execution", prompt=task_type + (single_prompt or ""), ) prediction = self._model_manager.infer_from_request_sync( - model_id=model_id, request=request + model_id=model_version, request=request ) if task_type == "": prediction_data = prediction.response[list(prediction.response.keys())[0]] From ab24790c8c173e4dc056dc1c3ead03d582571d52 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Tue, 12 Nov 2024 19:09:56 +0000 Subject: [PATCH 35/67] Add in replacement block --- .../models/foundation/florence2/v2.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 inference/core/workflows/core_steps/models/foundation/florence2/v2.py diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v2.py b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py new file mode 100644 index 000000000..e1a9dc946 --- /dev/null +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py @@ -0,0 +1,78 @@ +from inference.core.workflows.core_steps.models.foundation.florence2.v1 import ( + BaseManifest, + Florence2BlockV1, + TaskType, + GroundingSelectionMode, + LONG_DESCRIPTION, +) +from typing import Type, Union, Optional, List, Literal + +from pydantic import ConfigDict, Field +import supervision as sv + +from inference.core.workflows.execution_engine.entities.types import ( + WorkflowParameterSelector, +) + +from inference.core.workflows.execution_engine.entities.types import ( + ROBOFLOW_MODEL_ID_KIND, +) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + WorkflowImageData, +) + + +class V2BlockManifest(BaseManifest): + type: Literal["roboflow_core/florence_2@v2"] + model_id: Union[WorkflowParameterSelector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = ( + Field( + default="florence-2-base", + description="Model to be used", + examples=["florence-2-base"], + json_schema_extra={"always_visible": True}, + ) + ) + model_config = ConfigDict( + json_schema_extra={ + "name": "Florence-2 Model", + "version": "v2", + "short_description": "Run Florence-2 on an image", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + "search_keywords": ["Florence", "Florence-2", "Microsoft"], + "is_vlm_block": True, + "task_type_property": "task_type", + }, + protected_namespaces=(), + ) + + +class Florence2BlockV2(Florence2BlockV1): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return V2BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + task_type: TaskType, + prompt: Optional[str], + classes: Optional[List[str]], + grounding_detection: Optional[ + Union[Batch[sv.Detections], List[int], List[float]] + ], + grounding_selection_mode: GroundingSelectionMode, + ) -> BlockResult: + return super().run( + images, + model_id, + task_type, + prompt, + classes, + grounding_detection, + grounding_selection_mode, + ) From fcbf977c82a3ae7ef306db352cc5c3b1bcd67505 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Tue, 12 Nov 2024 19:10:10 +0000 Subject: [PATCH 36/67] Style --- .../models/foundation/florence2/v1.py | 6 ++++- .../models/foundation/florence2/v2.py | 25 ++++++++----------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index ae2b9b18d..71fbd15ad 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -272,6 +272,7 @@ def describe_outputs(cls) -> List[OutputDefinition]: def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.0.0,<2.0.0" + class BlockManifest(BaseManifest): type: Literal["roboflow_core/florence_2@v1"] model_version: Union[ @@ -298,6 +299,7 @@ class BlockManifest(BaseManifest): protected_namespaces=(), ) + class Florence2BlockV1(WorkflowBlock): def __init__( @@ -400,7 +402,9 @@ def run_locally( model_id=model_version, request=request ) if task_type == "": - prediction_data = prediction.response[list(prediction.response.keys())[0]] + prediction_data = prediction.response[ + list(prediction.response.keys())[0] + ] else: prediction_data = prediction.response[task_type] if task_type in TASKS_TO_EXTRACT_LABELS_AS_CLASSES: diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v2.py b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py index e1a9dc946..0ff2a0867 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py @@ -1,27 +1,24 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field + from inference.core.workflows.core_steps.models.foundation.florence2.v1 import ( + LONG_DESCRIPTION, BaseManifest, Florence2BlockV1, - TaskType, GroundingSelectionMode, - LONG_DESCRIPTION, + TaskType, ) -from typing import Type, Union, Optional, List, Literal - -from pydantic import ConfigDict, Field -import supervision as sv - -from inference.core.workflows.execution_engine.entities.types import ( - WorkflowParameterSelector, +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + WorkflowImageData, ) - from inference.core.workflows.execution_engine.entities.types import ( ROBOFLOW_MODEL_ID_KIND, + WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest -from inference.core.workflows.execution_engine.entities.base import ( - Batch, - WorkflowImageData, -) class V2BlockManifest(BaseManifest): From 99f970e1ea729b55b4a59edc26ce6af25e29817a Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Wed, 13 Nov 2024 16:23:11 +0100 Subject: [PATCH 37/67] Disable telemetry when running YOLO world --- inference/models/yolo_world/yolo_world.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/inference/models/yolo_world/yolo_world.py b/inference/models/yolo_world/yolo_world.py index a5d790440..f300b7515 100644 --- a/inference/models/yolo_world/yolo_world.py +++ b/inference/models/yolo_world/yolo_world.py @@ -5,7 +5,7 @@ import clip import numpy as np import torch -from ultralytics import YOLO +from ultralytics import YOLO, settings from inference.core import logger from inference.core.cache import cache @@ -30,6 +30,10 @@ EMBEDDINGS_EXPIRE_TIMEOUT = 1800 # 30 min +settings.update({"sync": False}) +settings.reset() + + class YOLOWorld(RoboflowCoreModel): """YOLO-World class for zero-shot object detection. From 1c2e4945c4ba67fa5a59b94b4e04db64c275641f Mon Sep 17 00:00:00 2001 From: Thomas Hansen Date: Wed, 13 Nov 2024 09:35:44 -0600 Subject: [PATCH 38/67] Update docs/workflows/execution_engine_changelog.md Co-authored-by: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> --- docs/workflows/execution_engine_changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index b8decc38b..9302e97d3 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -54,7 +54,7 @@ any *kind***, contrary to versions prior `v1.3.0`, which could only take `image` as batch-oriented inputs (as a result of unfortunate and not-needed coupling of kind to internal data format introduced **at the level of Execution Engine**). As a result of the change: - * **new input type was introduced:** `WorkflowBatchInput` should be used from now one to denote + * **new input type was introduced:** `WorkflowBatchInput` should be used from now on to denote batch-oriented inputs (and clearly separate them from `WorkflowParameters`). `WorkflowBatchInput` let users define both *[kind](/workflows/kinds/)* of the data and it's *[dimensionality](/workflows/workflow_execution/#steps-interactions-with-data)*. From 9b9ff3c4815bc5afa36dd108c8d1ffb22cc6ec1a Mon Sep 17 00:00:00 2001 From: Thomas Hansen Date: Wed, 13 Nov 2024 09:36:32 -0600 Subject: [PATCH 39/67] Update docs/workflows/execution_engine_changelog.md Co-authored-by: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> --- docs/workflows/execution_engine_changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 9302e97d3..719ab865a 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -65,7 +65,7 @@ format introduced **at the level of Execution Engine**). As a result of the chan properly. This may not be the case in the future, as in most cases batch-oriented data *kind* may be inferred by compiler (yet this feature is not implemented for now). - * **new selector type annotation were introduced** - named simply `Selector(...)`. + * **new selector type annotation was introduced** - named simply `Selector(...)`. `Selector(...)` is supposed to replace `StepOutputSelector`, `WorkflowImageSelector`, `StepOutputImageSelector`, `WorkflowVideoMetadataSelector` and `WorkflowParameterSelector` in block manifests, letting developers express that specific step manifest property is able to hold either selector of specific *kind*. From e8971035e3344422a2d167fcfcae4d1fb7163cad Mon Sep 17 00:00:00 2001 From: Thomas Hansen Date: Wed, 13 Nov 2024 09:36:46 -0600 Subject: [PATCH 40/67] Update docs/workflows/workflows_compiler.md Co-authored-by: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> --- docs/workflows/workflows_compiler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 420cfa7f8..ee2049e6e 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -245,7 +245,7 @@ have two almost equivalent ways of running: Since the default way for Workflow blocks to deal with the batches is to consume them element-by-element, **there is no real difference** between **batch-oriented data** and **scalars** -in such case. Execution Engine simply unpack scalars from batches and pass them to each step. +in such case. Execution Engine simply unpacks scalars from batches and pass them to each step. The process may complicate when block accepts batch input. You will learn the details in [blocks development guide](/workflows/create_workflow_block/), but From 21cd0a65243908635793a78b5e187fac210f19d8 Mon Sep 17 00:00:00 2001 From: Thomas Hansen Date: Wed, 13 Nov 2024 09:37:55 -0600 Subject: [PATCH 41/67] Update docs/workflows/workflows_compiler.md Co-authored-by: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> --- docs/workflows/workflows_compiler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index ee2049e6e..9bb90d671 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -249,7 +249,7 @@ in such case. Execution Engine simply unpacks scalars from batches and pass them The process may complicate when block accepts batch input. You will learn the details in [blocks development guide](/workflows/create_workflow_block/), but -block is required to denote each inputs that must be provided *batch-wise* and all inputs +block is required to denote each input that must be provided *batch-wise* and all inputs which can be feed with both batch-oriented data and scalars at the same time (which is much less common case). In such cases, *lineage* is used to deduce if the actual data feed into every step input is *batch* or *scalar*. When violation is detected (for instance *scalar* is provided for input From 053157b2af716dab72a079ef616f49ad3afcf6ae Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Wed, 13 Nov 2024 18:51:50 +0100 Subject: [PATCH 42/67] Pass webrtc TURN config as request parameter when calling POST /inference_pipelines/initialise_webrtc --- inference/core/env.py | 4 ---- .../stream_manager/manager_app/entities.py | 7 +++++++ .../manager_app/inference_pipeline_manager.py | 2 ++ .../stream_manager/manager_app/webrtc.py | 17 ++++++++--------- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/inference/core/env.py b/inference/core/env.py index 9a1ca0a07..6eb060216 100644 --- a/inference/core/env.py +++ b/inference/core/env.py @@ -452,7 +452,3 @@ WORKFLOW_BLOCKS_WRITE_DIRECTORY = os.getenv("WORKFLOW_BLOCKS_WRITE_DIRECTORY") DEDICATED_DEPLOYMENT_ID = os.getenv("DEDICATED_DEPLOYMENT_ID") - -WEBRTC_TURN_IP = os.getenv("WEBRTC_TURN_IP") -WEBRTC_TURN_USERNAME = os.getenv("WEBRTC_TURN_USERNAME") -WEBRTC_TURN_SHARED_SECRET = os.getenv("WEBRTC_TURN_SHARED_SECRET") diff --git a/inference/core/interfaces/stream_manager/manager_app/entities.py b/inference/core/interfaces/stream_manager/manager_app/entities.py index bbf4f7270..8b017c0d9 100644 --- a/inference/core/interfaces/stream_manager/manager_app/entities.py +++ b/inference/core/interfaces/stream_manager/manager_app/entities.py @@ -91,8 +91,15 @@ class WebRTCOffer(BaseModel): sdp: str +class WebRTCTURNConfig(BaseModel): + urls: str + username: str + credential: str + + class InitialiseWebRTCPipelinePayload(InitialisePipelinePayload): webrtc_offer: WebRTCOffer + webrtc_turn_config: WebRTCTURNConfig stream_output: Optional[List[str]] = Field(default_factory=list) data_output: Optional[List[str]] = Field(default_factory=list) webrtc_peer_timeout: float = 1 diff --git a/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py b/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py index cfb7cb552..e037a0718 100644 --- a/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py +++ b/inference/core/interfaces/stream_manager/manager_app/inference_pipeline_manager.py @@ -242,6 +242,7 @@ def start_loop(loop: asyncio.AbstractEventLoop): t.start() webrtc_offer = parsed_payload.webrtc_offer + webrtc_turn_config = parsed_payload.webrtc_turn_config webcam_fps = parsed_payload.webcam_fps to_inference_queue = SyncAsyncQueue(loop=loop) from_inference_queue = SyncAsyncQueue(loop=loop) @@ -251,6 +252,7 @@ def start_loop(loop: asyncio.AbstractEventLoop): future = asyncio.run_coroutine_threadsafe( init_rtc_peer_connection( webrtc_offer=webrtc_offer, + webrtc_turn_config=webrtc_turn_config, to_inference_queue=to_inference_queue, from_inference_queue=from_inference_queue, webrtc_peer_timeout=parsed_payload.webrtc_peer_timeout, diff --git a/inference/core/interfaces/stream_manager/manager_app/webrtc.py b/inference/core/interfaces/stream_manager/manager_app/webrtc.py index 3c01edf87..dd5288980 100644 --- a/inference/core/interfaces/stream_manager/manager_app/webrtc.py +++ b/inference/core/interfaces/stream_manager/manager_app/webrtc.py @@ -18,16 +18,14 @@ from av import VideoFrame from inference.core import logger -from inference.core.env import ( - WEBRTC_TURN_IP, - WEBRTC_TURN_SHARED_SECRET, - WEBRTC_TURN_USERNAME, -) from inference.core.interfaces.camera.entities import ( SourceProperties, VideoFrameProducer, ) -from inference.core.interfaces.stream_manager.manager_app.entities import WebRTCOffer +from inference.core.interfaces.stream_manager.manager_app.entities import ( + WebRTCOffer, + WebRTCTURNConfig, +) from inference.core.utils.async_utils import Queue as SyncAsyncQueue from inference.core.utils.function import experimental @@ -214,6 +212,7 @@ def __init__(self, video_transform_track: VideoTransformTrack, *args, **kwargs): async def init_rtc_peer_connection( webrtc_offer: WebRTCOffer, + webrtc_turn_config: WebRTCTURNConfig, to_inference_queue: "SyncAsyncQueue[VideoFrame]", from_inference_queue: "SyncAsyncQueue[np.ndarray]", webrtc_peer_timeout: float, @@ -230,9 +229,9 @@ async def init_rtc_peer_connection( ) turn_server = RTCIceServer( - urls=[f"turn:{WEBRTC_TURN_IP}:3478"], - username=WEBRTC_TURN_USERNAME, - credential=WEBRTC_TURN_SHARED_SECRET, + urls=[webrtc_turn_config.urls], + username=webrtc_turn_config.username, + credential=webrtc_turn_config.credential, ) peer_connection = RTCPeerConnectionWithFPS( video_transform_track=video_transform_track, From 5beeb0468fd4feaaf92412ae45895fed9dae5295 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Wed, 13 Nov 2024 19:08:17 +0100 Subject: [PATCH 43/67] Apply suggestions from PR CR --- docs/workflows/workflows_compiler.md | 4 ++-- .../workflows/core_steps/common/serializers.py | 14 +++++++------- .../v1/executor/output_constructor.py | 5 ----- .../core_steps/analytics/test_line_counter_v2.py | 14 ++++++++++++-- 4 files changed, 21 insertions(+), 16 deletions(-) diff --git a/docs/workflows/workflows_compiler.md b/docs/workflows/workflows_compiler.md index 9bb90d671..4d2bbf06d 100644 --- a/docs/workflows/workflows_compiler.md +++ b/docs/workflows/workflows_compiler.md @@ -250,8 +250,8 @@ in such case. Execution Engine simply unpacks scalars from batches and pass them The process may complicate when block accepts batch input. You will learn the details in [blocks development guide](/workflows/create_workflow_block/), but block is required to denote each input that must be provided *batch-wise* and all inputs -which can be feed with both batch-oriented data and scalars at the same time (which is much -less common case). In such cases, *lineage* is used to deduce if the actual data feed into +which can be fed with both batch-oriented data and scalars at the same time (which is much +less common case). In such cases, *lineage* is used to deduce if the actual data fed into every step input is *batch* or *scalar*. When violation is detected (for instance *scalar* is provided for input that requires batches or vice versa) - the error is raised. diff --git a/inference/core/workflows/core_steps/common/serializers.py b/inference/core/workflows/core_steps/common/serializers.py index 1a014cdc3..aa0cfea6f 100644 --- a/inference/core/workflows/core_steps/common/serializers.py +++ b/inference/core/workflows/core_steps/common/serializers.py @@ -158,15 +158,15 @@ def serialize_wildcard_kind(value: Any) -> Any: if isinstance(value, WorkflowImageData): value = serialise_image(image=value) elif isinstance(value, dict): - value = serialise_dict(elements=value) + value = serialize_dict(elements=value) elif isinstance(value, list): - value = serialise_list(elements=value) + value = serialize_list(elements=value) elif isinstance(value, sv.Detections): value = serialise_sv_detections(detections=value) return value -def serialise_list(elements: List[Any]) -> List[Any]: +def serialize_list(elements: List[Any]) -> List[Any]: result = [] for element in elements: element = serialize_wildcard_kind(value=element) @@ -174,9 +174,9 @@ def serialise_list(elements: List[Any]) -> List[Any]: return result -def serialise_dict(elements: Dict[str, Any]) -> Dict[str, Any]: - serialised_result = {} +def serialize_dict(elements: Dict[str, Any]) -> Dict[str, Any]: + serialized_result = {} for key, value in elements.items(): value = serialize_wildcard_kind(value=value) - serialised_result[key] = value - return serialised_result + serialized_result[key] = value + return serialized_result diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 883a3f541..594bda09d 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -4,10 +4,6 @@ import supervision as sv from networkx import DiGraph -from inference.core.workflows.core_steps.common.serializers import ( - serialise_image, - serialise_sv_detections, -) from inference.core.workflows.core_steps.common.utils import ( sv_detections_to_root_coordinates, ) @@ -18,7 +14,6 @@ from inference.core.workflows.execution_engine.entities.base import ( CoordinatesSystem, JsonField, - WorkflowImageData, ) from inference.core.workflows.execution_engine.entities.types import WILDCARD_KIND, Kind from inference.core.workflows.execution_engine.v1.compiler.entities import OutputNode diff --git a/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py b/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py index 0ff8ddfb6..eb6e8797b 100644 --- a/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py +++ b/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py @@ -63,8 +63,18 @@ def test_line_counter() -> None: ) # then - assert frame1_result == {"count_in": 0, "count_out": 0, "detections_in": frame1_detections[[False, False, False, False]], "detections_out": frame1_detections[[False, False, False, False]]} - assert frame2_result == {"count_in": 1, "count_out": 1, "detections_in": frame2_detections[[True, False, False, False]], "detections_out": frame2_detections[[False, True, False, False]]} + assert frame1_result == { + "count_in": 0, + "count_out": 0, + "detections_in": frame1_detections[[False, False, False, False]], + "detections_out": frame1_detections[[False, False, False, False]], + } + assert frame2_result == { + "count_in": 1, + "count_out": 1, + "detections_in": frame2_detections[[True, False, False, False]], + "detections_out": frame2_detections[[False, True, False, False]], + } def test_line_counter_no_trackers() -> None: From 05097d7e07349a41ca631b4a3bb44a448a5ceea4 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Wed, 13 Nov 2024 19:23:43 +0100 Subject: [PATCH 44/67] fix broken build --- requirements/_requirements.txt | 2 +- requirements/requirements.sdk.http.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 1d65cb8ad..5f5b64f25 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -16,7 +16,7 @@ pybase64<2.0.0 scikit-image>=0.19.0 requests-toolbelt>=1.0.0 wheel>=0.38.1 -setuptools>=70.0.0,<=72.1.0 +setuptools>=70.0.0 pytest-asyncio<=0.21.1 networkx>=3.1 pydantic~=2.6 diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index 6d86bb690..ba244d8a1 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -5,7 +5,7 @@ pillow>=9.0.0 requests>=2.27.0 supervision>=0.20.0,<1.0.0 numpy<=1.26.4 -aiohttp>=3.9.0 +aiohttp>=3.9.0,<=3.10.11 backoff>=2.2.0 aioresponses>=0.7.6 py-cpuinfo>=9.0.0 From aa0a209c2c15e755b60eeffa53543fcf22833a4d Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Wed, 13 Nov 2024 20:06:10 +0100 Subject: [PATCH 45/67] Remove reset from YOLO settings --- inference/models/yolo_world/yolo_world.py | 1 - 1 file changed, 1 deletion(-) diff --git a/inference/models/yolo_world/yolo_world.py b/inference/models/yolo_world/yolo_world.py index f300b7515..3209b41e8 100644 --- a/inference/models/yolo_world/yolo_world.py +++ b/inference/models/yolo_world/yolo_world.py @@ -31,7 +31,6 @@ settings.update({"sync": False}) -settings.reset() class YOLOWorld(RoboflowCoreModel): From 917d78443e1dcba8dff7ef38fd36025999ae2d2a Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Wed, 13 Nov 2024 22:36:06 +0000 Subject: [PATCH 46/67] Address pr comments --- .../core/workflows/core_steps/common/vlms.py | 9 -------- .../models/foundation/florence2/v1.py | 23 +++++++++++++++---- .../models/foundation/florence2/v2.py | 14 +++++------ 3 files changed, 26 insertions(+), 20 deletions(-) diff --git a/inference/core/workflows/core_steps/common/vlms.py b/inference/core/workflows/core_steps/common/vlms.py index 4252100b9..c85727337 100644 --- a/inference/core/workflows/core_steps/common/vlms.py +++ b/inference/core/workflows/core_steps/common/vlms.py @@ -80,12 +80,3 @@ "description": "Model returns a JSON response with the specified fields", }, } - - -FLORENCE_TASKS_METADATA = { - "unstructured": { - "name": "Unstructured Prompt", - "description": "Use free-form prompt to generate a response. Useful with finetuned models.", - }, - **VLM_TASKS_METADATA, -} diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index 71fbd15ad..ff64e866b 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -8,7 +8,7 @@ from inference.core.entities.requests.inference import LMMInferenceRequest from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.core_steps.common.vlms import FLORENCE_TASKS_METADATA +from inference.core.workflows.core_steps.common.vlms import VLM_TASKS_METADATA from inference.core.workflows.execution_engine.entities.base import ( Batch, OutputDefinition, @@ -37,6 +37,14 @@ T = TypeVar("T") K = TypeVar("K") +FLORENCE_TASKS_METADATA = { + "custom": { + "name": "Custom Prompt", + "description": "Use free-form prompt to generate a response. Useful with finetuned models.", + }, + **VLM_TASKS_METADATA, +} + DETECTIONS_CLASS_NAME_FIELD = "class_name" DETECTION_ID_FIELD = "detection_id" @@ -77,7 +85,7 @@ }, {"task_type": "detection-grounded-ocr", "florence_task": ""}, {"task_type": "region-proposal", "florence_task": ""}, - {"task_type": "unstructured", "florence_task": ""}, + {"task_type": "custom", "florence_task": None}, ] TASK_TYPE_TO_FLORENCE_TASK = { task["task_type"]: task["florence_task"] for task in SUPPORTED_TASK_TYPES_LIST @@ -364,6 +372,8 @@ def run_locally( grounding_selection_mode: GroundingSelectionMode, ) -> BlockResult: requires_detection_grounding = task_type in TASKS_REQUIRING_DETECTION_GROUNDING + + is_not_florence_task = task_type == "custom" task_type = TASK_TYPE_TO_FLORENCE_TASK[task_type] inference_images = [ i.to_inference_format(numpy_preferred=False) for i in images @@ -391,17 +401,22 @@ def run_locally( {"raw_output": None, "parsed_output": None, "classes": None} ) continue + if is_not_florence_task: + prompt = single_prompt or "" + else: + prompt = task_type + (single_prompt or "") + request = LMMInferenceRequest( api_key=self._api_key, model_id=model_version, image=image, source="workflow-execution", - prompt=task_type + (single_prompt or ""), + prompt=prompt, ) prediction = self._model_manager.infer_from_request_sync( model_id=model_version, request=request ) - if task_type == "": + if is_not_florence_task: prediction_data = prediction.response[ list(prediction.response.keys())[0] ] diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v2.py b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py index 0ff2a0867..28a11e248 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v2.py @@ -65,11 +65,11 @@ def run( grounding_selection_mode: GroundingSelectionMode, ) -> BlockResult: return super().run( - images, - model_id, - task_type, - prompt, - classes, - grounding_detection, - grounding_selection_mode, + images=images, + model_version=model_id, + task_type=task_type, + prompt=prompt, + classes=classes, + grounding_detection=grounding_detection, + grounding_selection_mode=grounding_selection_mode, ) From d6bfbe0a460eebe795620e5a54e0a03aed1e2a12 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Wed, 13 Nov 2024 23:38:26 +0000 Subject: [PATCH 47/67] Bugfix --- .../core/workflows/core_steps/models/foundation/florence2/v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py index f461eb2d0..b42f50456 100644 --- a/inference/core/workflows/core_steps/models/foundation/florence2/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/florence2/v1.py @@ -134,7 +134,7 @@ TASKS_REQUIRING_PROMPT = { "phrase-grounded-object-detection", "phrase-grounded-instance-segmentation", - "unstructured", + "custom", } TASKS_REQUIRING_CLASSES = { "open-vocabulary-object-detection", From c5dcbe88bce843961fda3f5d9e4cf7357833aed6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 11:35:29 +0100 Subject: [PATCH 48/67] Pin all dependencies and update to new versions of libs --- .../stream_interface/workflows_demo.py | 2 +- .../aggregating_objects_passing_data.py | 2 +- .../video_analysis/using_webhook_sink.py | 2 +- docker/dockerfiles/Dockerfile.onnx.cpu | 4 +- docker/dockerfiles/Dockerfile.onnx.cpu.dev | 4 +- .../dockerfiles/Dockerfile.onnx.cpu.parallel | 2 +- docker/dockerfiles/Dockerfile.onnx.cpu.slim | 2 +- .../Dockerfile.onnx.cpu.stream_manager | 1 + docker/dockerfiles/Dockerfile.onnx.gpu | 3 +- docker/dockerfiles/Dockerfile.onnx.gpu.dev | 1 + .../dockerfiles/Dockerfile.onnx.gpu.parallel | 3 +- docker/dockerfiles/Dockerfile.onnx.gpu.slim | 2 +- .../Dockerfile.onnx.gpu.stream_manager | 1 + .../dockerfiles/Dockerfile.onnx.jetson.4.5.0 | 1 + .../dockerfiles/Dockerfile.onnx.jetson.4.6.1 | 1 + .../dockerfiles/Dockerfile.onnx.jetson.5.1.1 | 1 + ...ockerfile.onnx.jetson.5.1.1.stream_manager | 1 + .../dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 5 ++- docker/dockerfiles/Dockerfile.onnx.lambda | 1 + .../dockerfiles/Dockerfile.onnx.lambda.slim | 1 + docker/dockerfiles/Dockerfile.onnx.trt | 1 + docker/dockerfiles/Dockerfile.onnx.udp.gpu | 1 + .../Dockerfile.stream_management_api | 1 + docs/foundation/paligemma.md | 2 +- docs/foundation/yolo_world.md | 4 +- docs/inference_helpers/inference_cli.md | 13 +++--- docs/notebooks/inference_pipeline_rtsp.ipynb | 1 - docs/quickstart/explore_models.md | 2 +- docs/quickstart/load_from_universe.md | 2 +- docs/quickstart/run_model_on_rtsp_webcam.md | 2 +- docs/using_inference/http_api.md | 6 ++- docs/using_inference/native_python_api.md | 2 +- docs/workflows/create_workflow_block.md | 4 +- examples/notebooks/inference_sdk.ipynb | 2 +- examples/notebooks/quickstart.ipynb | 2 +- examples/notebooks/workflows.ipynb | 2 +- inference/core/interfaces/stream/sinks.py | 10 ++--- .../core_steps/analytics/time_in_zone/v1.py | 1 - .../core_steps/analytics/time_in_zone/v2.py | 1 - .../configs/bounding_boxes_tracing.yml | 6 +-- inference_cli/lib/infer_adapter.py | 7 +-- requirements/_requirements.txt | 45 +++++++++---------- requirements/requirements.cli.txt | 18 ++++---- requirements/requirements.doctr.txt | 4 +- requirements/requirements.hosted.txt | 6 +-- requirements/requirements.http.txt | 4 +- requirements/requirements.jetson.txt | 8 ++-- requirements/requirements.parallel.txt | 4 +- requirements/requirements.sdk.http.txt | 14 +++--- requirements/requirements.waf.txt | 2 +- .../tile_detections_batch.py | 2 +- .../tile_detections_non_batch.py | 2 +- 52 files changed, 118 insertions(+), 103 deletions(-) diff --git a/development/stream_interface/workflows_demo.py b/development/stream_interface/workflows_demo.py index 5bfa7501c..b7fc41a34 100644 --- a/development/stream_interface/workflows_demo.py +++ b/development/stream_interface/workflows_demo.py @@ -11,7 +11,7 @@ from inference.core.utils.drawing import create_tiles STOP = False -ANNOTATOR = sv.BoundingBoxAnnotator() +ANNOTATOR = sv.BoxAnnotator() fps_monitor = sv.FPSMonitor() diff --git a/development/workflows_examples/video_analysis/aggregating_objects_passing_data.py b/development/workflows_examples/video_analysis/aggregating_objects_passing_data.py index db7702bda..73d1a5138 100644 --- a/development/workflows_examples/video_analysis/aggregating_objects_passing_data.py +++ b/development/workflows_examples/video_analysis/aggregating_objects_passing_data.py @@ -122,7 +122,7 @@ } STOP = False -ANNOTATOR = sv.BoundingBoxAnnotator() +ANNOTATOR = sv.BoxAnnotator() fps_monitor = sv.FPSMonitor() diff --git a/development/workflows_examples/video_analysis/using_webhook_sink.py b/development/workflows_examples/video_analysis/using_webhook_sink.py index 6201c7124..836de0670 100644 --- a/development/workflows_examples/video_analysis/using_webhook_sink.py +++ b/development/workflows_examples/video_analysis/using_webhook_sink.py @@ -208,7 +208,7 @@ } STOP = False -ANNOTATOR = sv.BoundingBoxAnnotator() +ANNOTATOR = sv.BoxAnnotator() fps_monitor = sv.FPSMonitor() diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu b/docker/dockerfiles/Dockerfile.onnx.cpu index 5cb200532..c04e3ba6f 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu +++ b/docker/dockerfiles/Dockerfile.onnx.cpu @@ -39,7 +39,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ - wheel>=0.38.0 \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip @@ -51,7 +51,7 @@ COPY --from=base / / WORKDIR /build COPY . . RUN make create_wheels -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then pip3 install -r requirements/requirements.vino.txt; rm -rf ~/.cache/pip; fi diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.dev b/docker/dockerfiles/Dockerfile.onnx.cpu.dev index 32c19370c..a41f84d06 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.dev +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.dev @@ -39,7 +39,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ - wheel>=0.38.0 \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip @@ -51,7 +51,7 @@ COPY --from=base / / WORKDIR /build COPY . . RUN make create_wheels -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 RUN pip3 install watchdog[watchmedo] RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then pip3 install -r requirements/requirements.vino.txt; rm -rf ~/.cache/pip; fi diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.parallel b/docker/dockerfiles/Dockerfile.onnx.cpu.parallel index 64f3d770b..61bcb3ea5 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.parallel +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.parallel @@ -42,7 +42,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.parallel.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - wheel>=0.38.0 \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip RUN apt-get update && apt-get install -y lsb-release curl gpg diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.slim b/docker/dockerfiles/Dockerfile.onnx.cpu.slim index 855186bb4..b3119a42f 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.slim +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.slim @@ -32,7 +32,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - wheel>=0.38.0 \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager b/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager index d1003dedf..957bf046b 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager @@ -23,6 +23,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.cpu.txt \ -r requirements.http.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu b/docker/dockerfiles/Dockerfile.onnx.gpu index 6f4162f41..d15a33c1c 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu +++ b/docker/dockerfiles/Dockerfile.onnx.gpu @@ -44,6 +44,7 @@ RUN python3 -m pip install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip @@ -60,7 +61,7 @@ WORKDIR /build COPY . . RUN ln -s /usr/bin/python3 /usr/bin/python RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install --no-cache-dir dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl +RUN pip3 install --no-cache-dir dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 WORKDIR /notebooks COPY examples/notebooks . diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.dev b/docker/dockerfiles/Dockerfile.onnx.gpu.dev index aaf43e640..d2fc5ff2b 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.dev +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.dev @@ -49,6 +49,7 @@ RUN python3 -m pip install \ -r requirements.sdk.http.txt \ -r requirements.cli.txt \ jupyterlab \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.parallel b/docker/dockerfiles/Dockerfile.onnx.gpu.parallel index 71da06f63..5e2c123b7 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.parallel +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.parallel @@ -31,6 +31,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.gaze.txt \ -r requirements.parallel.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip @@ -47,7 +48,7 @@ WORKDIR /build COPY . . RUN ln -s /usr/bin/python3 /usr/bin/python RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 WORKDIR /notebooks COPY examples/notebooks . diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.slim b/docker/dockerfiles/Dockerfile.onnx.gpu.slim index c12c3ba14..8ced44539 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.slim +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.slim @@ -27,7 +27,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - wheel>=0.38.0 \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager b/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager index 597ea91bc..bdefe1f20 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager @@ -21,6 +21,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.http.txt \ -r requirements.gpu.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 index cd8940d25..31f8c5f2a 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 @@ -42,6 +42,7 @@ RUN python3.8 -m pip install --upgrade pip wheel Cython && python3.8 -m pip inst -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 b/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 index e8a7cae05..1b6ae8d90 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 @@ -57,6 +57,7 @@ RUN python3.9 -m pip install --upgrade pip "h5py<=3.10.0" && python3.9 -m pip in -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 index 308a89af7..efb0114e9 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 @@ -52,6 +52,7 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \ -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager index 5a11b799f..e98a6fec4 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager @@ -40,6 +40,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.clip.txt \ -r requirements.http.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 0cdbae7b7..9302b2879 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -34,7 +34,8 @@ RUN python3 -m pip install --upgrade pip && \ -r requirements/requirements.groundingdino.txt \ -r requirements/requirements.sdk.http.txt \ -r requirements/requirements.yolo_world.txt \ - -r requirements/requirements.jetson.txt + -r requirements/requirements.jetson.txt \ + setuptools<=75.5.0 # Build the application WORKDIR /build @@ -45,7 +46,7 @@ RUN rm -f dist/* && \ python3 .release/pypi/inference.gpu.setup.py bdist_wheel && \ python3 .release/pypi/inference.sdk.setup.py bdist_wheel && \ python3 .release/pypi/inference.cli.setup.py bdist_wheel && \ - python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl + python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 # Set up the application runtime WORKDIR /app diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda b/docker/dockerfiles/Dockerfile.onnx.lambda index 8c8a33a3b..9d4adc67c 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda +++ b/docker/dockerfiles/Dockerfile.onnx.lambda @@ -47,6 +47,7 @@ RUN pip3 install \ -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ mangum \ + setuptools<=75.5.0 \ --upgrade \ --target "${LAMBDA_TASK_ROOT}" \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda.slim b/docker/dockerfiles/Dockerfile.onnx.lambda.slim index 3409bd0f5..85b44f422 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda.slim +++ b/docker/dockerfiles/Dockerfile.onnx.lambda.slim @@ -35,6 +35,7 @@ RUN pip3 install \ -r requirements.hosted.txt \ -r requirements.sdk.http.txt \ mangum \ + setuptools<=75.5.0 \ --upgrade \ --target "${LAMBDA_TASK_ROOT}" \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.trt b/docker/dockerfiles/Dockerfile.onnx.trt index adf04ae3a..cb6328ed1 100644 --- a/docker/dockerfiles/Dockerfile.onnx.trt +++ b/docker/dockerfiles/Dockerfile.onnx.trt @@ -33,6 +33,7 @@ RUN pip install --upgrade pip && pip install \ -r requirements.doctr.txt \ -r requirements.groundingdino.txt \ -r requirements.sdk.http.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.udp.gpu b/docker/dockerfiles/Dockerfile.onnx.udp.gpu index f86f39f48..33a89b9ed 100644 --- a/docker/dockerfiles/Dockerfile.onnx.udp.gpu +++ b/docker/dockerfiles/Dockerfile.onnx.udp.gpu @@ -25,6 +25,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.clip.txt \ -r requirements.http.txt \ -r requirements.gpu.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.stream_management_api b/docker/dockerfiles/Dockerfile.stream_management_api index 8ff63f0c1..0e9dd8184 100644 --- a/docker/dockerfiles/Dockerfile.stream_management_api +++ b/docker/dockerfiles/Dockerfile.stream_management_api @@ -23,6 +23,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.cpu.txt \ -r requirements.http.txt \ + setuptools<=75.5.0 \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docs/foundation/paligemma.md b/docs/foundation/paligemma.md index 7b312b102..eb274f721 100644 --- a/docs/foundation/paligemma.md +++ b/docs/foundation/paligemma.md @@ -122,7 +122,7 @@ print(response) detections = from_pali_gemma(response=response, resolution_wh=image.size, class_list=['person', 'car', 'backpack']) -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() annotatrd_image = bounding_box_annotator.annotate(image, detections) diff --git a/docs/foundation/yolo_world.md b/docs/foundation/yolo_world.md index 9e5921411..54c90cec1 100644 --- a/docs/foundation/yolo_world.md +++ b/docs/foundation/yolo_world.md @@ -43,7 +43,7 @@ YOLO World is faster than many other zero-shot object detection models like YOLO detections = sv.Detections.from_inference(results) - bounding_box_annotator = sv.BoundingBoxAnnotator() + bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() labels = [classes[class_id] for class_id in detections.class_id] @@ -98,7 +98,7 @@ YOLO World is faster than many other zero-shot object detection models like YOLO detections = sv.Detections.from_inference(results) - bounding_box_annotator = sv.BoundingBoxAnnotator() + bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() labels = [classes[class_id] for class_id in detections.class_id] diff --git a/docs/inference_helpers/inference_cli.md b/docs/inference_helpers/inference_cli.md index 55be649aa..8def634c1 100644 --- a/docs/inference_helpers/inference_cli.md +++ b/docs/inference_helpers/inference_cli.md @@ -234,8 +234,8 @@ inference infer -i {path_to_your_video_file} -m {your_project}/{version} -o {pat #### Configuration of visualisation Option `-c` can be provided with a path to `*.yml` file configuring `supervision` visualisation. There are few pre-defined configs: -- `bounding_boxes` - with `BoundingBoxAnnotator` and `LabelAnnotator` annotators -- `bounding_boxes_tracing` - with `ByteTracker` and annotators (`BoundingBoxAnnotator`, `LabelAnnotator`) +- `bounding_boxes` - with `BoxAnnotator` and `LabelAnnotator` annotators +- `bounding_boxes_tracing` - with `ByteTracker` and annotators (`BoxAnnotator`, `LabelAnnotator`) - `masks` - with `MaskAnnotator` and `LabelAnnotator` annotators - `polygons` - with `PolygonAnnotator` and `LabelAnnotator` annotators @@ -255,9 +255,9 @@ annotators: trace_length: 60 thickness: 2 tracking: - track_thresh: 0.25 - track_buffer: 30 - match_thresh: 0.8 + track_activation_threshold: 0.25 + lost_track_buffer: 30 + minimum_matching_threshold: 0.8 frame_rate: 30 ``` `annotators` field is a list of dictionaries with two keys: `type` and `param`. `type` points to @@ -265,7 +265,8 @@ name of annotator class: ```python from supervision import * ANNOTATOR_TYPE2CLASS = { - "bounding_box": BoundingBoxAnnotator, + "bounding_box": BoxAnnotator, + "box": BoxAnnotator, "mask": MaskAnnotator, "polygon": PolygonAnnotator, "color": ColorAnnotator, diff --git a/docs/notebooks/inference_pipeline_rtsp.ipynb b/docs/notebooks/inference_pipeline_rtsp.ipynb index 0b0eb5b36..9812426f4 100644 --- a/docs/notebooks/inference_pipeline_rtsp.ipynb +++ b/docs/notebooks/inference_pipeline_rtsp.ipynb @@ -151,7 +151,6 @@ "zones = [\n", " sv.PolygonZone(\n", " polygon=polygon,\n", - " frame_resolution_wh=[1440,2560],\n", " )\n", " for polygon\n", " in polygons\n", diff --git a/docs/quickstart/explore_models.md b/docs/quickstart/explore_models.md index b1c549eac..a331ca840 100644 --- a/docs/quickstart/explore_models.md +++ b/docs/quickstart/explore_models.md @@ -35,7 +35,7 @@ results = model.infer(image)[0] detections = sv.Detections.from_inference(results) # create supervision annotators -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() # annotate the image with our inference results diff --git a/docs/quickstart/load_from_universe.md b/docs/quickstart/load_from_universe.md index 28c8c981f..483cf3694 100644 --- a/docs/quickstart/load_from_universe.md +++ b/docs/quickstart/load_from_universe.md @@ -50,7 +50,7 @@ results = model.infer(image) detections = sv.Detections.from_inference(results[0].dict(by_alias=True, exclude_none=True)) # create supervision annotators -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() # annotate the image with our inference results diff --git a/docs/quickstart/run_model_on_rtsp_webcam.md b/docs/quickstart/run_model_on_rtsp_webcam.md index 6bc4d9cce..06d2244b7 100644 --- a/docs/quickstart/run_model_on_rtsp_webcam.md +++ b/docs/quickstart/run_model_on_rtsp_webcam.md @@ -126,7 +126,7 @@ import supervision as sv # create a bounding box annotator and label annotator to use in our custom sink label_annotator = sv.LabelAnnotator() -box_annotator = sv.BoundingBoxAnnotator() +box_annotator = sv.BoxAnnotator() def my_custom_sink(predictions: dict, video_frame: VideoFrame): # get the text labels for each prediction diff --git a/docs/using_inference/http_api.md b/docs/using_inference/http_api.md index 4cda003e4..c60cdd666 100644 --- a/docs/using_inference/http_api.md +++ b/docs/using_inference/http_api.md @@ -46,6 +46,10 @@ results = client.infer(image_url, model_id=model_id) ### Visualize Results ```python +import os + +import cv2 +import supervision as sv from inference_sdk import InferenceHTTPClient, InferenceConfiguration model_id = "soccer-players-5fuqs/1" @@ -66,7 +70,7 @@ result = client.infer(image, model_id=model_id) detections = sv.Detections.from_inference(result) #Create Supervision annotators -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() #Extract labels array from inference results diff --git a/docs/using_inference/native_python_api.md b/docs/using_inference/native_python_api.md index 96fe087f9..b00f519a6 100644 --- a/docs/using_inference/native_python_api.md +++ b/docs/using_inference/native_python_api.md @@ -54,7 +54,7 @@ results = model.infer(image)[0] detections = sv.Detections.from_inference(results) # Create Supervision annotators -bounding_box_annotator = sv.BoundingBoxAnnotator() +bounding_box_annotator = sv.BoxAnnotator() label_annotator = sv.LabelAnnotator() # Extract labels array from inference results diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 72c43776a..bb01f7139 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -1579,7 +1579,7 @@ the method signatures. crops: Batch[WorkflowImageData], crops_predictions: Batch[sv.Detections], ) -> BlockResult: - annotator = sv.BoundingBoxAnnotator() + annotator = sv.BoxAnnotator() visualisations = [] for image, prediction in zip(crops, crops_predictions): annotated_image = annotator.annotate( @@ -1867,7 +1867,7 @@ the method signatures. images_crops: Batch[Batch[WorkflowImageData]], crops_predictions: Batch[Batch[sv.Detections]], ) -> BlockResult: - annotator = sv.BoundingBoxAnnotator() + annotator = sv.BoxAnnotator() visualisations = [] for image_crops, crop_predictions in zip(images_crops, crops_predictions): visualisations_batch_element = [] diff --git a/examples/notebooks/inference_sdk.ipynb b/examples/notebooks/inference_sdk.ipynb index f54cba595..d02baecb7 100644 --- a/examples/notebooks/inference_sdk.ipynb +++ b/examples/notebooks/inference_sdk.ipynb @@ -151,7 +151,7 @@ "detections = sv.Detections.from_inference(results)\n", "\n", "#Initialize annotators\n", - "bounding_box_annotator = sv.BoundingBoxAnnotator()\n", + "bounding_box_annotator = sv.BoxAnnotator()\n", "label_annotator = sv.LabelAnnotator()\n", "\n", "#Get class labels from inference results\n", diff --git a/examples/notebooks/quickstart.ipynb b/examples/notebooks/quickstart.ipynb index f78ea6154..d2b6d77b1 100644 --- a/examples/notebooks/quickstart.ipynb +++ b/examples/notebooks/quickstart.ipynb @@ -170,7 +170,7 @@ "detections = sv.Detections.from_inference(result.dict(by_alias=True, exclude_none=True))\n", "\n", "#Initialize annotators\n", - "bounding_box_annotator = sv.BoundingBoxAnnotator()\n", + "bounding_box_annotator = sv.BoxAnnotator()\n", "label_annotator = sv.LabelAnnotator()\n", "\n", "#Get class labels from inference results\n", diff --git a/examples/notebooks/workflows.ipynb b/examples/notebooks/workflows.ipynb index 11003df64..61bc0c62e 100644 --- a/examples/notebooks/workflows.ipynb +++ b/examples/notebooks/workflows.ipynb @@ -791,7 +791,7 @@ } ], "source": [ - "annotator = sv.BoundingBoxAnnotator(thickness=20)\n", + "annotator = sv.BoxAnnotator(thickness=20)\n", "detections = sv.Detections.from_inference(detection_coco_and_plates[\"predictions\"])\n", "plt.imshow(annotator.annotate(multiple_cars_image_2.copy(), detections)[:, :, ::-1])\n", "plt.show()" diff --git a/inference/core/interfaces/stream/sinks.py b/inference/core/interfaces/stream/sinks.py index cd6d2a4bb..ca449fde8 100644 --- a/inference/core/interfaces/stream/sinks.py +++ b/inference/core/interfaces/stream/sinks.py @@ -18,7 +18,7 @@ from inference.core.utils.drawing import create_tiles from inference.core.utils.preprocess import letterbox_image -DEFAULT_BBOX_ANNOTATOR = sv.BoundingBoxAnnotator() +DEFAULT_BBOX_ANNOTATOR = sv.BoxAnnotator() DEFAULT_LABEL_ANNOTATOR = sv.LabelAnnotator() DEFAULT_FPS_MONITOR = sv.FPSMonitor() @@ -50,8 +50,8 @@ def render_boxes( ) -> None: """ Helper tool to render object detection predictions on top of video frame. It is designed - to be used with `InferencePipeline`, as sink for predictions. By default it uses - standard `sv.BoundingBoxAnnotator()` chained with `sv.LabelAnnotator()` + to be used with `InferencePipeline`, as sink for predictions. By default, it uses + standard `sv.BoxAnnotator()` chained with `sv.LabelAnnotator()` to draw bounding boxes and resizes prediction to 1280x720 (keeping aspect ratio and adding black padding). One may configure default behaviour, for instance to display latency and throughput statistics. In batch mode it will display tiles of frames and overlay predictions. @@ -70,7 +70,7 @@ def render_boxes( by `VideoSource` or list of frames from (it is possible for empty batch frames at corresponding positions to `predictions` list). Order is expected to match with `predictions` annotator (Union[BaseAnnotator, List[BaseAnnotator]]): instance of class inheriting from supervision BaseAnnotator - or list of such instances. If nothing is passed chain of `sv.BoundingBoxAnnotator()` and `sv.LabelAnnotator()` is used. + or list of such instances. If nothing is passed chain of `sv.BoxAnnotator()` and `sv.LabelAnnotator()` is used. display_size (Tuple[int, int]): tuple in format (width, height) to resize visualisation output fps_monitor (Optional[sv.FPSMonitor]): FPS monitor used to monitor throughput display_statistics (bool): Flag to decide if throughput and latency can be displayed in the result image, @@ -424,7 +424,7 @@ def init( Args: video_file_name (str): name of the video file to save predictions annotator (Union[BaseAnnotator, List[BaseAnnotator]]): instance of class inheriting from supervision BaseAnnotator - or list of such instances. If nothing is passed chain of `sv.BoundingBoxAnnotator()` and `sv.LabelAnnotator()` is used. + or list of such instances. If nothing is passed chain of `sv.BoxAnnotator()` and `sv.LabelAnnotator()` is used. display_size (Tuple[int, int]): tuple in format (width, height) to resize visualisation output. Should be set to the same value as `display_size` for InferencePipeline with single video source, otherwise it represents the size of single visualisation tile (whole tiles mosaic will be scaled to diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py index 63678a6d8..295c3d0aa 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v1.py @@ -146,7 +146,6 @@ def run( ) self._batch_of_polygon_zones[metadata.video_identifier] = sv.PolygonZone( polygon=np.array(zone), - frame_resolution_wh=image.numpy_image.shape[:-1], triggering_anchors=(sv.Position(triggering_anchor),), ) polygon_zone = self._batch_of_polygon_zones[metadata.video_identifier] diff --git a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py index acb4d0749..11650c4fa 100644 --- a/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py +++ b/inference/core/workflows/core_steps/analytics/time_in_zone/v2.py @@ -148,7 +148,6 @@ def run( ) self._batch_of_polygon_zones[metadata.video_identifier] = sv.PolygonZone( polygon=np.array(zone), - frame_resolution_wh=image.numpy_image.shape[:-1], triggering_anchors=(sv.Position(triggering_anchor),), ) polygon_zone = self._batch_of_polygon_zones[metadata.video_identifier] diff --git a/inference_cli/configs/bounding_boxes_tracing.yml b/inference_cli/configs/bounding_boxes_tracing.yml index 019db48ef..3b8671e2b 100644 --- a/inference_cli/configs/bounding_boxes_tracing.yml +++ b/inference_cli/configs/bounding_boxes_tracing.yml @@ -12,7 +12,7 @@ annotators: trace_length: 60 thickness: 2 tracking: - track_thresh: 0.25 - track_buffer: 30 - match_thresh: 0.8 + track_activation_threshold: 0.25 + lost_track_buffer: 30 + minimum_matching_threshold: 0.8 frame_rate: 30 diff --git a/inference_cli/lib/infer_adapter.py b/inference_cli/lib/infer_adapter.py index 15f1940d6..1fa5af632 100644 --- a/inference_cli/lib/infer_adapter.py +++ b/inference_cli/lib/infer_adapter.py @@ -8,7 +8,7 @@ import numpy as np from supervision import ( BlurAnnotator, - BoundingBoxAnnotator, + BoxAnnotator, BoxCornerAnnotator, ByteTrack, CircleAnnotator, @@ -46,7 +46,8 @@ ) ANNOTATOR_TYPE2CLASS = { - "bounding_box": BoundingBoxAnnotator, + "bounding_box": BoxAnnotator, + "box": BoxAnnotator, "mask": MaskAnnotator, "polygon": PolygonAnnotator, "color": ColorAnnotator, @@ -313,7 +314,7 @@ def is_something_to_do( def build_visualisation_callback( visualisation_config: Optional[str], ) -> Callable[[np.ndarray, dict], Optional[np.ndarray]]: - annotators = [BoundingBoxAnnotator()] + annotators = [BoxAnnotator()] byte_tracker = None if visualisation_config is not None: raw_configuration = retrieve_visualisation_config( diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 5f5b64f25..25bd25ee8 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -1,32 +1,31 @@ -aiortc>=1.9.0 -APScheduler<=3.10.1 -cython<=3.0.0 -python-dotenv<=2.0.0 +aiortc>=1.9.0,<2.0.0 +APScheduler>=3.10.1,<4.0.0 +cython~=3.0.0 +python-dotenv~=1.0.0 fastapi>=0.100,<0.111 numpy<=1.26.4 opencv-python>=4.8.1.78,<=4.10.0.84 -piexif<=1.1.3 +piexif~=1.1.3 pillow<11.0 -prometheus-fastapi-instrumentator<=6.0.0 -redis<6.0.0 -requests>=2.26.0 -rich<=13.5.2 -supervision>=0.21.0,<=0.22.0 -pybase64<2.0.0 -scikit-image>=0.19.0 -requests-toolbelt>=1.0.0 -wheel>=0.38.1 -setuptools>=70.0.0 -pytest-asyncio<=0.21.1 -networkx>=3.1 +prometheus-fastapi-instrumentator~=7.0.0 +redis~=5.0.0 +requests>=2.26.0,<2.32.3 +rich~=13.0.0 +supervision>=0.21.0,<=0.25.0 +pybase64~=1.0.0 +scikit-image>=0.19.0,<=0.24.0 +requests-toolbelt~=1.0.0 +wheel>=0.38.1,<=0.45.0 +setuptools>=70.0.0 # lack of upper-bound to ensure compatibility with Google Colab (builds to define one if needed) +networkx~=3.1 pydantic~=2.6 pydantic-settings~=2.2 -openai>=1.12.0 -structlog>=24.1.0 -zxing-cpp>=2.2.0 -boto3<=1.34.123 -typing_extensions>=4.8.0 -pydot>=2.0.0 +openai>=1.12.0,<2.0.0 +structlog>=24.1.0,<25.0.0 +zxing-cpp~=2.2.0 +boto3<=1.35.60 +typing_extensions>=4.8.0,<=4.12.2 +pydot~=2.0.0 shapely>=2.0.0,<2.1.0 tldextract~=5.1.2 packaging~=24.0 diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index a703e8e50..ea79fb0d1 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -1,12 +1,12 @@ -requests<=2.31.0 +requests>=2.26.0,<2.32.3 docker==6.1.3 typer>=0.9.0,<=0.12.5 -rich<=13.5.2 -PyYAML>=6.0.0 -supervision>=0.20.0,<1.0.0 +rich~=13.0.0 +PyYAML~=6.0.0 +supervision>=0.21.0,<=0.25.0 opencv-python>=4.8.1.78,<=4.10.0.84 -tqdm>=4.0.0 -GPUtil>=1.4.0 -py-cpuinfo>=9.0.0 -aiohttp>=3.9.0 -backoff>=2.2.0 +tqdm~=4.0.0 +GPUtil~=1.4.0 +py-cpuinfo~=9.0.0 +aiohttp>=3.9.0,<=3.10.11 +backoff~=2.2.0 diff --git a/requirements/requirements.doctr.txt b/requirements/requirements.doctr.txt index c361bf091..9494098fd 100644 --- a/requirements/requirements.doctr.txt +++ b/requirements/requirements.doctr.txt @@ -1,2 +1,2 @@ -python-doctr[torch] -tf2onnx \ No newline at end of file +python-doctr[torch]==0.10.0 +tf2onnx~=1.16.0 \ No newline at end of file diff --git a/requirements/requirements.hosted.txt b/requirements/requirements.hosted.txt index 821cbac7b..c031776b4 100644 --- a/requirements/requirements.hosted.txt +++ b/requirements/requirements.hosted.txt @@ -1,3 +1,3 @@ -pymemcache<=4.0.0 -elasticache_auto_discovery<=1.0.0 -prometheus-fastapi-instrumentator<=6.0.0 \ No newline at end of file +pymemcache~=4.0.0 +elasticache_auto_discovery~=1.0.0 +prometheus-fastapi-instrumentator~=7.0.0 \ No newline at end of file diff --git a/requirements/requirements.http.txt b/requirements/requirements.http.txt index 04e6b7132..d6249c883 100644 --- a/requirements/requirements.http.txt +++ b/requirements/requirements.http.txt @@ -1,5 +1,5 @@ uvicorn[standard]<=0.22.0 python-multipart>=0.0.7,<=0.0.9 fastapi-cprofile<=0.0.2 -orjson>=3.9.10 -asgi_correlation_id>=4.3.1 +orjson>=3.9.10,<=3.10.11 +asgi_correlation_id~=4.3.1 diff --git a/requirements/requirements.jetson.txt b/requirements/requirements.jetson.txt index dbcbeed61..703e9d485 100644 --- a/requirements/requirements.jetson.txt +++ b/requirements/requirements.jetson.txt @@ -1,4 +1,4 @@ -pypdfium2 -jupyterlab -PyYAML -onnxruntime-gpu +pypdfium2~=4.0.0 +jupyterlab~=4.0.0 +PyYAML~=6.0.0 +onnxruntime-gpu>=1.15.1,<1.20.0 diff --git a/requirements/requirements.parallel.txt b/requirements/requirements.parallel.txt index d330682d7..0ad196d66 100644 --- a/requirements/requirements.parallel.txt +++ b/requirements/requirements.parallel.txt @@ -1,2 +1,2 @@ -celery -gunicorn \ No newline at end of file +celery~=5.0.0 +gunicorn~=23.0.0 \ No newline at end of file diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index ba244d8a1..226f49a4d 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -1,11 +1,9 @@ -requests>=2.0.0 -dataclasses-json>=0.6.0 +requests>=2.26.0,<2.32.3 +dataclasses-json~=0.6.0 opencv-python>=4.8.1.78,<=4.10.0.84 -pillow>=9.0.0 -requests>=2.27.0 -supervision>=0.20.0,<1.0.0 +pillow>=9.0.0,<11.0 +supervision>=0.21.0,<=0.25.0 numpy<=1.26.4 aiohttp>=3.9.0,<=3.10.11 -backoff>=2.2.0 -aioresponses>=0.7.6 -py-cpuinfo>=9.0.0 +backoff~=2.2.0 +py-cpuinfo~=9.0.0 diff --git a/requirements/requirements.waf.txt b/requirements/requirements.waf.txt index e4f807a3d..64b858d86 100644 --- a/requirements/requirements.waf.txt +++ b/requirements/requirements.waf.txt @@ -1 +1 @@ -metlo \ No newline at end of file +metlo~=0.1.5 \ No newline at end of file diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py index eeb4d5da6..d00cf2168 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_batch.py @@ -82,7 +82,7 @@ def run( images_crops: Batch[Batch[WorkflowImageData]], crops_predictions: Batch[Batch[sv.Detections]], ) -> BlockResult: - annotator = sv.BoundingBoxAnnotator() + annotator = sv.BoxAnnotator() visualisations = [] for image_crops, crop_predictions in zip(images_crops, crops_predictions): visualisations_batch_element = [] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py index cca572d1e..ab51455d2 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/tile_detections_non_batch.py @@ -78,7 +78,7 @@ def run( crops: Batch[WorkflowImageData], crops_predictions: Batch[sv.Detections], ) -> BlockResult: - annotator = sv.BoundingBoxAnnotator() + annotator = sv.BoxAnnotator() visualisations = [] for image, prediction in zip(crops, crops_predictions): annotated_image = annotator.annotate( From f1d6623461ea8997368027f9572fe75c76ad09e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 11:44:30 +0100 Subject: [PATCH 49/67] Fix tqdm dependency --- requirements/requirements.cli.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index ea79fb0d1..c7f0cdc85 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -5,7 +5,7 @@ rich~=13.0.0 PyYAML~=6.0.0 supervision>=0.21.0,<=0.25.0 opencv-python>=4.8.1.78,<=4.10.0.84 -tqdm~=4.0.0 +tqdm>=4.0.0,<5.0.0 GPUtil~=1.4.0 py-cpuinfo~=9.0.0 aiohttp>=3.9.0,<=3.10.11 From 4cf2e9ff7f199621839cbfe74acc793e0f010833 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 11:59:16 +0100 Subject: [PATCH 50/67] Fix doctr and requests dependency --- requirements/_requirements.txt | 2 +- requirements/requirements.cli.txt | 2 +- requirements/requirements.doctr.txt | 2 +- requirements/requirements.sdk.http.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 25bd25ee8..e7568bcd0 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -9,7 +9,7 @@ piexif~=1.1.3 pillow<11.0 prometheus-fastapi-instrumentator~=7.0.0 redis~=5.0.0 -requests>=2.26.0,<2.32.3 +requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x rich~=13.0.0 supervision>=0.21.0,<=0.25.0 pybase64~=1.0.0 diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index c7f0cdc85..19567def0 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -1,4 +1,4 @@ -requests>=2.26.0,<2.32.3 +requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x docker==6.1.3 typer>=0.9.0,<=0.12.5 rich~=13.0.0 diff --git a/requirements/requirements.doctr.txt b/requirements/requirements.doctr.txt index 9494098fd..bd1d66c37 100644 --- a/requirements/requirements.doctr.txt +++ b/requirements/requirements.doctr.txt @@ -1,2 +1,2 @@ -python-doctr[torch]==0.10.0 +python-doctr[torch]>=0.7.0,<=0.10.0 tf2onnx~=1.16.0 \ No newline at end of file diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index 226f49a4d..9bed754b9 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -1,4 +1,4 @@ -requests>=2.26.0,<2.32.3 +requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x dataclasses-json~=0.6.0 opencv-python>=4.8.1.78,<=4.10.0.84 pillow>=9.0.0,<11.0 From ac53f4430efbc2bb1a88721d609f1afa6aba8eb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 12:13:12 +0100 Subject: [PATCH 51/67] Introduce inference_id kind --- inference/core/workflows/core_steps/loader.py | 4 +++- .../execution_engine/entities/types.py | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 5b6eb99d4..10f351697 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -357,7 +357,7 @@ VIDEO_METADATA_KIND, WILDCARD_KIND, ZONE_KIND, - Kind, + Kind, INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import WorkflowBlock @@ -409,6 +409,7 @@ PREDICTION_TYPE_KIND.name: deserialize_string_kind, PARENT_ID_KIND.name: deserialize_string_kind, BYTES_KIND.name: deserialize_bytes_kind, + INFERENCE_ID_KIND.name: deserialize_string_kind, } @@ -552,4 +553,5 @@ def load_kinds() -> List[Kind]: PARENT_ID_KIND, IMAGE_METADATA_KIND, BYTES_KIND, + INFERENCE_ID_KIND, ] diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index be94ec362..f9d3d239d 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -1020,6 +1020,24 @@ def __hash__(self) -> int: internal_data_type="str", ) +INFERENCE_ID_KIND_DOCS = """ +This kind represents identifier of inference process, which is usually opaque string used as correlation +identifier for external systems (like Roboflow Model Monitoring). + +Examples: +``` +b1851e3d-a145-4540-a39e-875f21f6cd84 +``` +""" + +INFERENCE_ID_KIND = Kind( + name="inference_id", + description="Inference identifier", + docs=INFERENCE_ID_KIND_DOCS, + serialised_data_type="str", + internal_data_type="str", +) + STEP_AS_SELECTED_ELEMENT = "step" STEP_OUTPUT_AS_SELECTED_ELEMENT = "step_output" From f4e4ad53f67bae90152d0e024b0aa3fc1d395e2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 12:18:17 +0100 Subject: [PATCH 52/67] Adjust tests to BC in supervision --- .../execution/test_workflow_with_sahi.py | 2 +- ...est_workflow_with_video_metadata_processing.py | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py index bcdca5b99..17ee2f56b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py @@ -352,7 +352,7 @@ def slicer_callback(image_slice: np.ndarray): callback=slicer_callback, slice_wh=(640, 640), overlap_ratio_wh=(0.2, 0.2), - overlap_filter_strategy=sv.OverlapFilter.NON_MAX_SUPPRESSION, + overlap_filter=sv.OverlapFilter.NON_MAX_SUPPRESSION, iou_threshold=0.3, ) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py b/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py index c50ea7116..7a2027fe8 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py @@ -172,7 +172,7 @@ def test_workflow_with_tracker( "fps": 50, "comes_from_video_file": True, } - metadata_license_plare_image = { + metadata_license_plate_image = { "video_identifier": "c", "frame_number": 1, "frame_timestamp": datetime.now().isoformat(), @@ -197,7 +197,7 @@ def test_workflow_with_tracker( result_3 = execution_engine.run( runtime_parameters={ "image": [dogs_image, license_plate_image], - "video_metadata": [metadata_dogs_image, metadata_license_plare_image], + "video_metadata": [metadata_dogs_image, metadata_license_plate_image], } ) first_dogs_frame_tracker_ids = result_1[0]["tracker_id"] @@ -214,7 +214,10 @@ def test_workflow_with_tracker( first_crowd_frame_tracker_ids == second_crowd_frame_tracker_ids ), "The same image, expected no tracker IDs change" assert first_license_plate_frame_tracker_ids == [ - 15, - 16, - 17, - ], "External IDs for all trackers are global, hence we offset by numer of all ever generated tracker IDs" + 1, + 2, + 3, + ], ( + "Since `supervision>=0.25.0` tracker IDs are unique for each new tracker instance - and we " + "expect new tracker for `metadata_license_plate_image` to be created - hence fresh tracker ids" + ) From 6d71a8e1bee0778c24b126e7da1b6cfe67dbd2e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 12:33:32 +0100 Subject: [PATCH 53/67] Introduce new versions of affected blocks --- .../formatters/vlm_as_classifier/v2.py | 267 +++++++++++++ .../formatters/vlm_as_detector/v2.py | 376 ++++++++++++++++++ .../roboflow/instance_segmentation/v2.py | 364 +++++++++++++++++ .../models/roboflow/keypoint_detection/v2.py | 353 ++++++++++++++++ .../roboflow/multi_class_classification/v2.py | 254 ++++++++++++ .../roboflow/multi_label_classification/v2.py | 251 ++++++++++++ .../models/roboflow/object_detection/v2.py | 330 +++++++++++++++ 7 files changed, 2195 insertions(+) create mode 100644 inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py create mode 100644 inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py create mode 100644 inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py create mode 100644 inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py create mode 100644 inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py create mode 100644 inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py create mode 100644 inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py new file mode 100644 index 000000000..81ab06be2 --- /dev/null +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py @@ -0,0 +1,267 @@ +import json +import logging +import re +from typing import Dict, List, Literal, Optional, Tuple, Type, Union +from uuid import uuid4 + +from pydantic import ConfigDict, Field + +from inference.core.workflows.execution_engine.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, + IMAGE_KIND, + LANGUAGE_MODEL_OUTPUT_KIND, + LIST_OF_VALUES_KIND, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +JSON_MARKDOWN_BLOCK_PATTERN = re.compile(r"```json([\s\S]*?)```", flags=re.IGNORECASE) + +LONG_DESCRIPTION = """ +The block expects string input that would be produced by blocks exposing Large Language Models (LLMs) and +Visual Language Models (VLMs). Input is parsed to classification prediction and returned as block output. + +Accepted formats: + +- valid JSON strings + +- JSON documents wrapped with Markdown tags (very common for GPT responses) + +Example: +``` +{"my": "json"} +``` + +**Details regarding block behavior:** + +- `error_status` is set `True` whenever parsing cannot be completed + +- in case of multiple markdown blocks with raw JSON content - only first will be parsed +""" + +SHORT_DESCRIPTION = "Parses raw string into classification prediction." + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "VLM as Classifier", + "version": "v2", + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "formatter", + } + ) + type: Literal["roboflow_core/vlm_as_classifier@v2"] + image: Selector(kind=[IMAGE_KIND]) = Field( + description="The image which was the base to generate VLM prediction", + examples=["$inputs.image", "$steps.cropping.crops"], + ) + vlm_output: Selector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + title="VLM Output", + description="The string with raw classification prediction to parse.", + examples=[["$steps.lmm.output"]], + ) + classes: Union[ + Selector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), + List[str], + ] = Field( + description="List of all classes used by the model, required to " + "generate mapping between class name and class id.", + examples=[["$steps.lmm.classes", "$inputs.classes", ["class_a", "class_b"]]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition(name="predictions", kind=[CLASSIFICATION_PREDICTION_KIND]), + OutputDefinition(name="inference_id", kind=[INFERENCE_ID_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class VLMAsClassifierBlockV2(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + image: WorkflowImageData, + vlm_output: str, + classes: List[str], + ) -> BlockResult: + inference_id = f"{uuid4()}" + error_status, parsed_data = string2json( + raw_json=vlm_output, + ) + if error_status: + return { + "error_status": True, + "predictions": None, + "inference_id": inference_id, + } + if "class_name" in parsed_data and "confidence" in parsed_data: + return parse_multi_class_classification_results( + image=image, + results=parsed_data, + classes=classes, + inference_id=inference_id, + ) + if "predicted_classes" in parsed_data: + return parse_multi_label_classification_results( + image=image, + results=parsed_data, + classes=classes, + inference_id=inference_id, + ) + return { + "error_status": True, + "predictions": None, + "inference_id": inference_id, + } + + +def string2json( + raw_json: str, +) -> Tuple[bool, dict]: + json_blocks_found = JSON_MARKDOWN_BLOCK_PATTERN.findall(raw_json) + if len(json_blocks_found) == 0: + return try_parse_json(raw_json) + first_block = json_blocks_found[0] + return try_parse_json(first_block) + + +def try_parse_json(content: str) -> Tuple[bool, dict]: + try: + return False, json.loads(content) + except Exception as error: + logging.warning( + f"Could not parse JSON to dict in `roboflow_core/vlm_as_classifier@v1` block. " + f"Error type: {error.__class__.__name__}. Details: {error}" + ) + return True, {} + + +def parse_multi_class_classification_results( + image: WorkflowImageData, + results: dict, + classes: List[str], + inference_id: str, +) -> dict: + try: + class2id_mapping = create_classes_index(classes=classes) + height, width = image.numpy_image.shape[:2] + top_class = results["class_name"] + confidences = {top_class: scale_confidence(results["confidence"])} + predictions = [] + if top_class not in class2id_mapping: + predictions.append( + { + "class": top_class, + "class_id": -1, + "confidence": confidences.get(top_class, 0.0), + } + ) + for class_name, class_id in class2id_mapping.items(): + predictions.append( + { + "class": class_name, + "class_id": class_id, + "confidence": confidences.get(class_name, 0.0), + } + ) + parsed_prediction = { + "image": {"width": width, "height": height}, + "predictions": predictions, + "top": top_class, + "confidence": confidences[top_class], + "inference_id": inference_id, + "parent_id": image.parent_metadata.parent_id, + } + return { + "error_status": False, + "predictions": parsed_prediction, + "inference_id": inference_id, + } + except Exception as error: + logging.warning( + f"Could not parse multi-class classification results in `roboflow_core/vlm_as_classifier@v1` block. " + f"Error type: {error.__class__.__name__}. Details: {error}" + ) + return {"error_status": True, "predictions": None, "inference_id": inference_id} + + +def parse_multi_label_classification_results( + image: WorkflowImageData, + results: dict, + classes: List[str], + inference_id: str, +) -> dict: + try: + class2id_mapping = create_classes_index(classes=classes) + height, width = image.numpy_image.shape[:2] + predicted_classes_confidences = {} + for prediction in results["predicted_classes"]: + if prediction["class"] not in class2id_mapping: + class2id_mapping[prediction["class"]] = -1 + if prediction["class"] in predicted_classes_confidences: + old_confidence = predicted_classes_confidences[prediction["class"]] + new_confidence = scale_confidence(value=prediction["confidence"]) + predicted_classes_confidences[prediction["class"]] = max( + old_confidence, new_confidence + ) + else: + predicted_classes_confidences[prediction["class"]] = scale_confidence( + value=prediction["confidence"] + ) + predictions = { + class_name: { + "confidence": predicted_classes_confidences.get(class_name, 0.0), + "class_id": class_id, + } + for class_name, class_id in class2id_mapping.items() + } + parsed_prediction = { + "image": {"width": width, "height": height}, + "predictions": predictions, + "predicted_classes": list(predicted_classes_confidences.keys()), + "inference_id": inference_id, + "parent_id": image.parent_metadata.parent_id, + } + return { + "error_status": False, + "predictions": parsed_prediction, + "inference_id": inference_id, + } + except Exception as error: + logging.warning( + f"Could not parse multi-label classification results in `roboflow_core/vlm_as_classifier@v1` block. " + f"Error type: {error.__class__.__name__}. Details: {error}" + ) + return {"error_status": True, "predictions": None, "inference_id": inference_id} + + +def create_classes_index(classes: List[str]) -> Dict[str, int]: + return {class_name: idx for idx, class_name in enumerate(classes)} + + +def scale_confidence(value: float) -> float: + return min(max(float(value), 0.0), 1.0) diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py new file mode 100644 index 000000000..955731cb5 --- /dev/null +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py @@ -0,0 +1,376 @@ +import hashlib +import json +import logging +import re +from functools import partial +from typing import Dict, List, Literal, Optional, Tuple, Type, Union +from uuid import uuid4 + +import numpy as np +import supervision as sv +from pydantic import ConfigDict, Field, model_validator +from supervision.config import CLASS_NAME_DATA_FIELD + +from inference.core.workflows.core_steps.common.utils import ( + attach_parents_coordinates_to_sv_detections, +) +from inference.core.workflows.core_steps.common.vlms import VLM_TASKS_METADATA +from inference.core.workflows.execution_engine.constants import ( + DETECTION_ID_KEY, + IMAGE_DIMENSIONS_KEY, + INFERENCE_ID_KEY, + PREDICTION_TYPE_KEY, +) +from inference.core.workflows.execution_engine.entities.base import ( + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + IMAGE_KIND, + LANGUAGE_MODEL_OUTPUT_KIND, + LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + +JSON_MARKDOWN_BLOCK_PATTERN = re.compile(r"```json([\s\S]*?)```", flags=re.IGNORECASE) + +LONG_DESCRIPTION = """ +The block expects string input that would be produced by blocks exposing Large Language Models (LLMs) and +Visual Language Models (VLMs). Input is parsed to object-detection prediction and returned as block output. + +Accepted formats: + +- valid JSON strings + +- JSON documents wrapped with Markdown tags + +Example +``` +{"my": "json"} +``` + +**Details regarding block behavior:** + +- `error_status` is set `True` whenever parsing cannot be completed + +- in case of multiple markdown blocks with raw JSON content - only first will be parsed +""" + +SHORT_DESCRIPTION = "Parses raw string into object-detection prediction." + +SUPPORTED_TASKS = { + "object-detection", + "object-detection-and-caption", + "open-vocabulary-object-detection", + "phrase-grounded-object-detection", + "region-proposal", + "ocr-with-text-detection", +} +RELEVANT_TASKS_METADATA = { + k: v for k, v in VLM_TASKS_METADATA.items() if k in SUPPORTED_TASKS +} + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "VLM as Detector", + "version": "v2", + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "formatter", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/vlm_as_detector@v2"] + image: Selector(kind=[IMAGE_KIND]) = Field( + description="The image which was the base to generate VLM prediction", + examples=["$inputs.image", "$steps.cropping.crops"], + ) + vlm_output: Selector(kind=[LANGUAGE_MODEL_OUTPUT_KIND]) = Field( + title="VLM Output", + description="The string with raw classification prediction to parse.", + examples=[["$steps.lmm.output"]], + ) + classes: Optional[ + Union[ + Selector(kind=[LIST_OF_VALUES_KIND]), + Selector(kind=[LIST_OF_VALUES_KIND]), + List[str], + ] + ] = Field( + description="List of all classes used by the model, required to " + "generate mapping between class name and class id.", + examples=[["$steps.lmm.classes", "$inputs.classes", ["class_a", "class_b"]]], + json_schema_extra={ + "relevant_for": { + "model_type": { + "values": ["google-gemini", "anthropic-claude"], + "required": True, + }, + } + }, + ) + model_type: Literal["google-gemini", "anthropic-claude", "florence-2"] = Field( + description="Type of the model that generated prediction", + examples=[["google-gemini", "anthropic-claude", "florence-2"]], + ) + task_type: Literal[tuple(SUPPORTED_TASKS)] = Field( + description="Task type to performed by model.", + json_schema_extra={ + "values_metadata": RELEVANT_TASKS_METADATA, + }, + ) + + @model_validator(mode="after") + def validate(self) -> "BlockManifest": + if (self.model_type, self.task_type) not in REGISTERED_PARSERS: + raise ValueError( + f"Could not parse result of task {self.task_type} for model {self.model_type}" + ) + if self.model_type != "florence-2" and self.classes is None: + raise ValueError( + "Must pass list of classes to this block when using gemini or claude" + ) + + return self + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="error_status", kind=[BOOLEAN_KIND]), + OutputDefinition( + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] + ), + OutputDefinition(name="inference_id", kind=[INFERENCE_ID_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class VLMAsDetectorBlockV2(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + image: WorkflowImageData, + vlm_output: str, + classes: Optional[List[str]], + model_type: str, + task_type: str, + ) -> BlockResult: + inference_id = f"{uuid4()}" + error_status, parsed_data = string2json( + raw_json=vlm_output, + ) + if error_status: + return { + "error_status": True, + "predictions": None, + "inference_id": inference_id, + } + try: + predictions = REGISTERED_PARSERS[(model_type, task_type)]( + image=image, + parsed_data=parsed_data, + classes=classes, + inference_id=inference_id, + ) + return { + "error_status": False, + "predictions": predictions, + "inference_id": inference_id, + } + except Exception as error: + logging.warning( + f"Could not parse VLM prediction for model {model_type} and task {task_type} " + f"in `roboflow_core/vlm_as_detector@v1` block. " + f"Error type: {error.__class__.__name__}. Details: {error}" + ) + return { + "error_status": True, + "predictions": None, + "inference_id": inference_id, + } + + +def string2json( + raw_json: str, +) -> Tuple[bool, dict]: + json_blocks_found = JSON_MARKDOWN_BLOCK_PATTERN.findall(raw_json) + if len(json_blocks_found) == 0: + return try_parse_json(raw_json) + first_block = json_blocks_found[0] + return try_parse_json(first_block) + + +def try_parse_json(content: str) -> Tuple[bool, dict]: + try: + return False, json.loads(content) + except Exception as error: + logging.warning( + f"Could not parse JSON to dict in `roboflow_core/vlm_as_detector@v1` block. " + f"Error type: {error.__class__.__name__}. Details: {error}" + ) + return True, {} + + +def parse_gemini_object_detection_response( + image: WorkflowImageData, + parsed_data: dict, + classes: List[str], + inference_id: str, +) -> sv.Detections: + class_name2id = create_classes_index(classes=classes) + image_height, image_width = image.numpy_image.shape[:2] + if len(parsed_data["detections"]) == 0: + return sv.Detections.empty() + xyxy, class_id, class_name, confidence = [], [], [], [] + for detection in parsed_data["detections"]: + xyxy.append( + [ + detection["x_min"] * image_width, + detection["y_min"] * image_height, + detection["x_max"] * image_width, + detection["y_max"] * image_height, + ] + ) + class_id.append(class_name2id.get(detection["class_name"], -1)) + class_name.append(detection["class_name"]) + confidence.append(scale_confidence(detection.get("confidence", 1.0))) + xyxy = np.array(xyxy).round(0) if len(xyxy) > 0 else np.empty((0, 4)) + confidence = np.array(confidence) if len(confidence) > 0 else np.empty(0) + class_id = np.array(class_id).astype(int) if len(class_id) > 0 else np.empty(0) + class_name = np.array(class_name) if len(class_name) > 0 else np.empty(0) + detection_ids = np.array([str(uuid4()) for _ in range(len(xyxy))]) + dimensions = np.array([[image_height, image_width]] * len(xyxy)) + inference_ids = np.array([inference_id] * len(xyxy)) + prediction_type = np.array(["object-detection"] * len(xyxy)) + data = { + CLASS_NAME_DATA_FIELD: class_name, + IMAGE_DIMENSIONS_KEY: dimensions, + INFERENCE_ID_KEY: inference_ids, + DETECTION_ID_KEY: detection_ids, + PREDICTION_TYPE_KEY: prediction_type, + } + detections = sv.Detections( + xyxy=xyxy, + confidence=confidence, + class_id=class_id, + mask=None, + tracker_id=None, + data=data, + ) + return attach_parents_coordinates_to_sv_detections( + detections=detections, + image=image, + ) + + +def create_classes_index(classes: List[str]) -> Dict[str, int]: + return {class_name: idx for idx, class_name in enumerate(classes)} + + +def scale_confidence(value: float) -> float: + return min(max(float(value), 0.0), 1.0) + + +def parse_florence2_object_detection_response( + image: WorkflowImageData, + parsed_data: dict, + classes: Optional[List[str]], + inference_id: str, + florence_task_type: str, +): + image_height, image_width = image.numpy_image.shape[:2] + detections = sv.Detections.from_lmm( + "florence_2", + result={florence_task_type: parsed_data}, + resolution_wh=(image_width, image_height), + ) + detections.class_id = np.array([0] * len(detections)) + if florence_task_type == "": + detections.data["class_name"] = np.array(["roi"] * len(detections)) + if florence_task_type in {"", ""}: + unique_class_names = set(detections.data.get("class_name", [])) + class_name_to_id = { + name: get_4digit_from_md5(name) for name in unique_class_names + } + class_ids = [ + class_name_to_id.get(name, -1) + for name in detections.data.get("class_name", ["unknown"] * len(detections)) + ] + detections.class_id = np.array(class_ids) + if florence_task_type in "": + class_name_to_id = {name: idx for idx, name in enumerate(classes)} + class_ids = [ + class_name_to_id.get(name, -1) + for name in detections.data.get("class_name", ["unknown"] * len(detections)) + ] + detections.class_id = np.array(class_ids) + dimensions = np.array([[image_height, image_width]] * len(detections)) + detection_ids = np.array([str(uuid4()) for _ in range(len(detections))]) + inference_ids = np.array([inference_id] * len(detections)) + prediction_type = np.array(["object-detection"] * len(detections)) + detections.data.update( + { + INFERENCE_ID_KEY: inference_ids, + DETECTION_ID_KEY: detection_ids, + PREDICTION_TYPE_KEY: prediction_type, + IMAGE_DIMENSIONS_KEY: dimensions, + } + ) + detections.confidence = np.array([1.0 for _ in detections]) + return attach_parents_coordinates_to_sv_detections( + detections=detections, image=image + ) + + +def get_4digit_from_md5(input_string): + md5_hash = hashlib.md5(input_string.encode("utf-8")) + hex_digest = md5_hash.hexdigest() + integer_value = int(hex_digest[:9], 16) + return integer_value % 10000 + + +REGISTERED_PARSERS = { + ("google-gemini", "object-detection"): parse_gemini_object_detection_response, + ("anthropic-claude", "object-detection"): parse_gemini_object_detection_response, + ("florence-2", "object-detection"): partial( + parse_florence2_object_detection_response, florence_task_type="" + ), + ("florence-2", "open-vocabulary-object-detection"): partial( + parse_florence2_object_detection_response, + florence_task_type="", + ), + ("florence-2", "object-detection-and-caption"): partial( + parse_florence2_object_detection_response, + florence_task_type="", + ), + ("florence-2", "phrase-grounded-object-detection"): partial( + parse_florence2_object_detection_response, + florence_task_type="", + ), + ("florence-2", "region-proposal"): partial( + parse_florence2_object_detection_response, + florence_task_type="", + ), + ("florence-2", "ocr-with-text-detection"): partial( + parse_florence2_object_detection_response, + florence_task_type="", + ), +} diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py new file mode 100644 index 000000000..6912238aa --- /dev/null +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py @@ -0,0 +1,364 @@ +from typing import List, Literal, Optional, Type, Union + +from pydantic import ConfigDict, Field, PositiveInt + +from inference.core.entities.requests.inference import ( + InstanceSegmentationInferenceRequest, +) +from inference.core.env import ( + HOSTED_INSTANCE_SEGMENTATION_URL, + LOCAL_INFERENCE_API_URL, + WORKFLOWS_REMOTE_API_TARGET, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, +) +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.utils import ( + attach_parents_coordinates_to_batch_of_sv_detections, + attach_prediction_type_info_to_sv_detections_batch, + convert_inference_detections_batch_to_sv_detections, + filter_out_unwanted_classes_from_sv_detections_batch, +) +from inference.core.workflows.execution_engine.constants import INFERENCE_ID_KEY +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + INSTANCE_SEGMENTATION_PREDICTION_KIND, + INTEGER_KIND, + LIST_OF_VALUES_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + STRING_KIND, + FloatZeroToOne, + ImageInputField, + RoboflowModelField, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) +from inference_sdk import InferenceConfiguration, InferenceHTTPClient + +LONG_DESCRIPTION = """ +Run inference on an instance segmentation model hosted on or uploaded to Roboflow. + +You can query any model that is private to your account, or any public model available +on [Roboflow Universe](https://universe.roboflow.com). + +You will need to set your Roboflow API key in your Inference environment to use this +block. To learn more about setting your Roboflow API key, [refer to the Inference +documentation](https://inference.roboflow.com/quickstart/configure_api_key/). +""" + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Instance Segmentation Model", + "version": "v2", + "short_description": "Predict the shape, size, and location of objects.", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/roboflow_instance_segmentation_model@v2"] + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], + ) + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], + ) + ) + confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.4, + description="Confidence threshold for predictions", + examples=[0.3, "$inputs.confidence_threshold"], + ) + iou_threshold: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.3, + description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", + examples=[0.4, "$inputs.iou_threshold"], + ) + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=300, + description="Maximum number of detections to return", + examples=[300, "$inputs.max_detections"], + ) + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=3000, + description="Maximum number of candidates as NMS input to be taken into account.", + examples=[3000, "$inputs.max_candidates"], + ) + mask_decode_mode: Union[ + Literal["accurate", "tradeoff", "fast"], + Selector(kind=[STRING_KIND]), + ] = Field( + default="accurate", + description="Parameter of mask decoding in prediction post-processing.", + examples=["accurate", "$inputs.mask_decode_mode"], + ) + tradeoff_factor: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.0, + description="Post-processing parameter to dictate tradeoff between fast and accurate", + examples=[0.3, "$inputs.tradeoff_factor"], + ) + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Parameter to decide if Active Learning data sampling is disabled for the model", + examples=[True, "$inputs.disable_active_learning"], + ) + active_learning_target_dataset: Union[ + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ] = Field( + default=None, + description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " + "docs for more information", + examples=["my_project", "$inputs.al_target_project"], + ) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name=INFERENCE_ID_KEY, kind=[INFERENCE_ID_KIND]), + OutputDefinition( + name="predictions", + kind=[INSTANCE_SEGMENTATION_PREDICTION_KIND], + ), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class RoboflowInstanceSegmentationModelBlockV2(WorkflowBlock): + + def __init__( + self, + model_manager: ModelManager, + api_key: Optional[str], + step_execution_mode: StepExecutionMode, + ): + self._model_manager = model_manager + self._api_key = api_key + self._step_execution_mode = step_execution_mode + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["model_manager", "api_key", "step_execution_mode"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + mask_decode_mode: Literal["accurate", "tradeoff", "fast"], + tradeoff_factor: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + if self._step_execution_mode is StepExecutionMode.LOCAL: + return self.run_locally( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + mask_decode_mode=mask_decode_mode, + tradeoff_factor=tradeoff_factor, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + elif self._step_execution_mode is StepExecutionMode.REMOTE: + return self.run_remotely( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + mask_decode_mode=mask_decode_mode, + tradeoff_factor=tradeoff_factor, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + else: + raise ValueError( + f"Unknown step execution mode: {self._step_execution_mode}" + ) + + def run_locally( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + mask_decode_mode: Literal["accurate", "tradeoff", "fast"], + tradeoff_factor: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + request = InstanceSegmentationInferenceRequest( + api_key=self._api_key, + model_id=model_id, + image=inference_images, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + mask_decode_mode=mask_decode_mode, + tradeoff_factor=tradeoff_factor, + source="workflow-execution", + ) + self._model_manager.add_model( + model_id=model_id, + api_key=self._api_key, + ) + predictions = self._model_manager.infer_from_request_sync( + model_id=model_id, request=request + ) + if not isinstance(predictions, list): + predictions = [predictions] + predictions = [ + e.model_dump(by_alias=True, exclude_none=True) for e in predictions + ] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def run_remotely( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + mask_decode_mode: Literal["accurate", "tradeoff", "fast"], + tradeoff_factor: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + api_url = ( + LOCAL_INFERENCE_API_URL + if WORKFLOWS_REMOTE_API_TARGET != "hosted" + else HOSTED_INSTANCE_SEGMENTATION_URL + ) + client = InferenceHTTPClient( + api_url=api_url, + api_key=self._api_key, + ) + if WORKFLOWS_REMOTE_API_TARGET == "hosted": + client.select_api_v0() + client_config = InferenceConfiguration( + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence_threshold=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + mask_decode_mode=mask_decode_mode, + tradeoff_factor=tradeoff_factor, + max_batch_size=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, + source="workflow-execution", + ) + client.configure(inference_configuration=client_config) + inference_images = [i.numpy_image for i in images] + predictions = client.infer( + inference_input=inference_images, + model_id=model_id, + ) + if not isinstance(predictions, list): + predictions = [predictions] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def _post_process_result( + self, + images: Batch[WorkflowImageData], + predictions: List[dict], + class_filter: Optional[List[str]], + ) -> BlockResult: + inference_id = predictions[0].get(INFERENCE_ID_KEY, None) + predictions = convert_inference_detections_batch_to_sv_detections(predictions) + predictions = attach_prediction_type_info_to_sv_detections_batch( + predictions=predictions, + prediction_type="instance-segmentation", + ) + predictions = filter_out_unwanted_classes_from_sv_detections_batch( + predictions=predictions, + classes_to_accept=class_filter, + ) + predictions = attach_parents_coordinates_to_batch_of_sv_detections( + images=images, + predictions=predictions, + ) + return [ + {"inference_id": inference_id, "predictions": prediction} + for prediction in predictions + ] diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py new file mode 100644 index 000000000..e464e3ab8 --- /dev/null +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py @@ -0,0 +1,353 @@ +from typing import List, Literal, Optional, Type, Union + +from pydantic import ConfigDict, Field, PositiveInt + +from inference.core.entities.requests.inference import ( + KeypointsDetectionInferenceRequest, +) +from inference.core.env import ( + HOSTED_DETECT_URL, + LOCAL_INFERENCE_API_URL, + WORKFLOWS_REMOTE_API_TARGET, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, +) +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.utils import ( + add_inference_keypoints_to_sv_detections, + attach_parents_coordinates_to_batch_of_sv_detections, + attach_prediction_type_info_to_sv_detections_batch, + convert_inference_detections_batch_to_sv_detections, + filter_out_unwanted_classes_from_sv_detections_batch, +) +from inference.core.workflows.execution_engine.constants import INFERENCE_ID_KEY +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, + LIST_OF_VALUES_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + FloatZeroToOne, + ImageInputField, + RoboflowModelField, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) +from inference_sdk import InferenceConfiguration, InferenceHTTPClient + +LONG_DESCRIPTION = """ +Run inference on a keypoint detection model hosted on or uploaded to Roboflow. + +You can query any model that is private to your account, or any public model available +on [Roboflow Universe](https://universe.roboflow.com). + +You will need to set your Roboflow API key in your Inference environment to use this +block. To learn more about setting your Roboflow API key, [refer to the Inference +documentation](https://inference.roboflow.com/quickstart/configure_api_key/). +""" + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Keypoint Detection Model", + "version": "v2", + "short_description": "Predict skeletons on objects.", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/roboflow_keypoint_detection_model@v2"] + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], + ) + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], + ) + ) + confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.4, + description="Confidence threshold for predictions", + examples=[0.3, "$inputs.confidence_threshold"], + ) + iou_threshold: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.3, + description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", + examples=[0.4, "$inputs.iou_threshold"], + ) + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=300, + description="Maximum number of detections to return", + examples=[300, "$inputs.max_detections"], + ) + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=3000, + description="Maximum number of candidates as NMS input to be taken into account.", + examples=[3000, "$inputs.max_candidates"], + ) + keypoint_confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.0, + description="Confidence threshold to predict keypoint as visible.", + examples=[0.3, "$inputs.keypoint_confidence"], + ) + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Parameter to decide if Active Learning data sampling is disabled for the model", + examples=[True, "$inputs.disable_active_learning"], + ) + active_learning_target_dataset: Union[ + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ] = Field( + default=None, + description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " + "docs for more information", + examples=["my_project", "$inputs.al_target_project"], + ) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name=INFERENCE_ID_KEY, kind=[INFERENCE_ID_KIND]), + OutputDefinition( + name="predictions", kind=[KEYPOINT_DETECTION_PREDICTION_KIND] + ), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class RoboflowKeypointDetectionModelBlockV2(WorkflowBlock): + + def __init__( + self, + model_manager: ModelManager, + api_key: Optional[str], + step_execution_mode: StepExecutionMode, + ): + self._model_manager = model_manager + self._api_key = api_key + self._step_execution_mode = step_execution_mode + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["model_manager", "api_key", "step_execution_mode"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + keypoint_confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + if self._step_execution_mode is StepExecutionMode.LOCAL: + return self.run_locally( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + keypoint_confidence=keypoint_confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + elif self._step_execution_mode is StepExecutionMode.REMOTE: + return self.run_remotely( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + keypoint_confidence=keypoint_confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + else: + raise ValueError( + f"Unknown step execution mode: {self._step_execution_mode}" + ) + + def run_locally( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + keypoint_confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + request = KeypointsDetectionInferenceRequest( + api_key=self._api_key, + model_id=model_id, + image=inference_images, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + keypoint_confidence=keypoint_confidence, + source="workflow-execution", + ) + self._model_manager.add_model( + model_id=model_id, + api_key=self._api_key, + ) + predictions = self._model_manager.infer_from_request_sync( + model_id=model_id, request=request + ) + if not isinstance(predictions, list): + predictions = [predictions] + predictions = [ + e.model_dump(by_alias=True, exclude_none=True) for e in predictions + ] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def run_remotely( + self, + images: Batch[Optional[WorkflowImageData]], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + keypoint_confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + api_url = ( + LOCAL_INFERENCE_API_URL + if WORKFLOWS_REMOTE_API_TARGET != "hosted" + else HOSTED_DETECT_URL + ) + client = InferenceHTTPClient( + api_url=api_url, + api_key=self._api_key, + ) + if WORKFLOWS_REMOTE_API_TARGET == "hosted": + client.select_api_v0() + client_config = InferenceConfiguration( + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence_threshold=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + keypoint_confidence_threshold=keypoint_confidence, + max_batch_size=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, + source="workflow-execution", + ) + client.configure(inference_configuration=client_config) + inference_images = [i.numpy_image for i in images] + predictions = client.infer( + inference_input=inference_images, + model_id=model_id, + ) + if not isinstance(predictions, list): + predictions = [predictions] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def _post_process_result( + self, + images: Batch[WorkflowImageData], + predictions: List[dict], + class_filter: Optional[List[str]], + ) -> BlockResult: + inference_id = predictions[0].get(INFERENCE_ID_KEY, None) + detections = convert_inference_detections_batch_to_sv_detections(predictions) + for prediction, image_detections in zip(predictions, detections): + add_inference_keypoints_to_sv_detections( + inference_prediction=prediction["predictions"], + detections=image_detections, + ) + detections = attach_prediction_type_info_to_sv_detections_batch( + predictions=detections, + prediction_type="keypoint-detection", + ) + detections = filter_out_unwanted_classes_from_sv_detections_batch( + predictions=detections, + classes_to_accept=class_filter, + ) + detections = attach_parents_coordinates_to_batch_of_sv_detections( + images=images, + predictions=detections, + ) + return [ + {"inference_id": inference_id, "predictions": image_detections} + for image_detections in detections + ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py new file mode 100644 index 000000000..75e19b677 --- /dev/null +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py @@ -0,0 +1,254 @@ +from typing import List, Literal, Optional, Type, Union + +from pydantic import ConfigDict, Field + +from inference.core.entities.requests.inference import ClassificationInferenceRequest +from inference.core.env import ( + HOSTED_CLASSIFICATION_URL, + LOCAL_INFERENCE_API_URL, + WORKFLOWS_REMOTE_API_TARGET, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, +) +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.utils import attach_prediction_type_info +from inference.core.workflows.execution_engine.constants import ( + INFERENCE_ID_KEY, + PARENT_ID_KEY, + ROOT_PARENT_ID_KEY, +) +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + FloatZeroToOne, + ImageInputField, + RoboflowModelField, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) +from inference_sdk import InferenceConfiguration, InferenceHTTPClient + +LONG_DESCRIPTION = """ +Run inference on a multi-class classification model hosted on or uploaded to Roboflow. + +You can query any model that is private to your account, or any public model available +on [Roboflow Universe](https://universe.roboflow.com). + +You will need to set your Roboflow API key in your Inference environment to use this +block. To learn more about setting your Roboflow API key, [refer to the Inference +documentation](https://inference.roboflow.com/quickstart/configure_api_key/). +""" + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Single-Label Classification Model", + "version": "v2", + "short_description": "Apply a single tag to an image.", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/roboflow_classification_model@v2"] + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.4, + description="Confidence threshold for predictions", + examples=[0.3, "$inputs.confidence_threshold"], + ) + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Parameter to decide if Active Learning data sampling is disabled for the model", + examples=[True, "$inputs.disable_active_learning"], + ) + active_learning_target_dataset: Union[ + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ] = Field( + default=None, + description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " + "docs for more information", + examples=["my_project", "$inputs.al_target_project"], + ) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="predictions", kind=[CLASSIFICATION_PREDICTION_KIND]), + OutputDefinition(name=INFERENCE_ID_KEY, kind=[INFERENCE_ID_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class RoboflowClassificationModelBlockV2(WorkflowBlock): + + def __init__( + self, + model_manager: ModelManager, + api_key: Optional[str], + step_execution_mode: StepExecutionMode, + ): + self._model_manager = model_manager + self._api_key = api_key + self._step_execution_mode = step_execution_mode + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["model_manager", "api_key", "step_execution_mode"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + if self._step_execution_mode is StepExecutionMode.LOCAL: + return self.run_locally( + images=images, + model_id=model_id, + confidence=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + elif self._step_execution_mode is StepExecutionMode.REMOTE: + return self.run_remotely( + images=images, + model_id=model_id, + confidence=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + else: + raise ValueError( + f"Unknown step execution mode: {self._step_execution_mode}" + ) + + def run_locally( + self, + images: Batch[WorkflowImageData], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + request = ClassificationInferenceRequest( + api_key=self._api_key, + model_id=model_id, + image=inference_images, + confidence=confidence, + disable_active_learning=disable_active_learning, + source="workflow-execution", + active_learning_target_dataset=active_learning_target_dataset, + ) + self._model_manager.add_model( + model_id=model_id, + api_key=self._api_key, + ) + predictions = self._model_manager.infer_from_request_sync( + model_id=model_id, request=request + ) + if isinstance(predictions, list): + predictions = [ + e.model_dump(by_alias=True, exclude_none=True) for e in predictions + ] + else: + predictions = [predictions.model_dump(by_alias=True, exclude_none=True)] + return self._post_process_result( + predictions=predictions, + images=images, + ) + + def run_remotely( + self, + images: Batch[Optional[WorkflowImageData]], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + api_url = ( + LOCAL_INFERENCE_API_URL + if WORKFLOWS_REMOTE_API_TARGET != "hosted" + else HOSTED_CLASSIFICATION_URL + ) + client = InferenceHTTPClient( + api_url=api_url, + api_key=self._api_key, + ) + if WORKFLOWS_REMOTE_API_TARGET == "hosted": + client.select_api_v0() + client_config = InferenceConfiguration( + confidence_threshold=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + max_batch_size=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, + source="workflow-execution", + ) + client.configure(inference_configuration=client_config) + non_empty_inference_images = [i.numpy_image for i in images] + predictions = client.infer( + inference_input=non_empty_inference_images, + model_id=model_id, + ) + if not isinstance(predictions, list): + predictions = [predictions] + return self._post_process_result( + predictions=predictions, + images=images, + ) + + def _post_process_result( + self, + images: Batch[WorkflowImageData], + predictions: List[dict], + ) -> BlockResult: + inference_id = predictions[0].get(INFERENCE_ID_KEY, None) + predictions = attach_prediction_type_info( + predictions=predictions, + prediction_type="classification", + ) + for prediction, image in zip(predictions, images): + prediction[PARENT_ID_KEY] = image.parent_metadata.parent_id + prediction[ROOT_PARENT_ID_KEY] = ( + image.workflow_root_ancestor_metadata.parent_id + ) + return [ + {"inference_id": inference_id, "predictions": prediction} + for prediction in predictions + ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py new file mode 100644 index 000000000..cc76f03a3 --- /dev/null +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py @@ -0,0 +1,251 @@ +from typing import List, Literal, Optional, Type, Union + +from pydantic import ConfigDict, Field + +from inference.core.entities.requests.inference import ClassificationInferenceRequest +from inference.core.env import ( + HOSTED_CLASSIFICATION_URL, + LOCAL_INFERENCE_API_URL, + WORKFLOWS_REMOTE_API_TARGET, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, +) +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.utils import attach_prediction_type_info +from inference.core.workflows.execution_engine.constants import ( + INFERENCE_ID_KEY, + PARENT_ID_KEY, + ROOT_PARENT_ID_KEY, +) +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + CLASSIFICATION_PREDICTION_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + FloatZeroToOne, + ImageInputField, + RoboflowModelField, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) +from inference_sdk import InferenceConfiguration, InferenceHTTPClient + +LONG_DESCRIPTION = """ +Run inference on a multi-label classification model hosted on or uploaded to Roboflow. + +You can query any model that is private to your account, or any public model available +on [Roboflow Universe](https://universe.roboflow.com). + +You will need to set your Roboflow API key in your Inference environment to use this +block. To learn more about setting your Roboflow API key, [refer to the Inference +documentation](https://inference.roboflow.com/quickstart/configure_api_key/). +""" + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Multi-Label Classification Model", + "version": "v2", + "short_description": "Apply multiple tags to an image.", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/roboflow_multi_label_classification_model@v2"] + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.4, + description="Confidence threshold for predictions", + examples=[0.3, "$inputs.confidence_threshold"], + ) + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Parameter to decide if Active Learning data sampling is disabled for the model", + examples=[True, "$inputs.disable_active_learning"], + ) + active_learning_target_dataset: Union[ + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ] = Field( + default=None, + description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " + "docs for more information", + examples=["my_project", "$inputs.al_target_project"], + ) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="predictions", kind=[CLASSIFICATION_PREDICTION_KIND]), + OutputDefinition(name=INFERENCE_ID_KEY, kind=[INFERENCE_ID_KIND]), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class RoboflowMultiLabelClassificationModelBlockV2(WorkflowBlock): + + def __init__( + self, + model_manager: ModelManager, + api_key: Optional[str], + step_execution_mode: StepExecutionMode, + ): + self._model_manager = model_manager + self._api_key = api_key + self._step_execution_mode = step_execution_mode + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["model_manager", "api_key", "step_execution_mode"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + if self._step_execution_mode is StepExecutionMode.LOCAL: + return self.run_locally( + images=images, + model_id=model_id, + confidence=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + elif self._step_execution_mode is StepExecutionMode.REMOTE: + return self.run_remotely( + images=images, + model_id=model_id, + confidence=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + else: + raise ValueError( + f"Unknown step execution mode: {self._step_execution_mode}" + ) + + def run_locally( + self, + images: Batch[WorkflowImageData], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + request = ClassificationInferenceRequest( + api_key=self._api_key, + model_id=model_id, + image=inference_images, + confidence=confidence, + disable_active_learning=disable_active_learning, + source="workflow-execution", + active_learning_target_dataset=active_learning_target_dataset, + ) + self._model_manager.add_model( + model_id=model_id, + api_key=self._api_key, + ) + predictions = self._model_manager.infer_from_request_sync( + model_id=model_id, request=request + ) + if isinstance(predictions, list): + predictions = [ + e.dict(by_alias=True, exclude_none=True) for e in predictions + ] + else: + predictions = [predictions.dict(by_alias=True, exclude_none=True)] + return self._post_process_result( + predictions=predictions, + images=images, + ) + + def run_remotely( + self, + images: Batch[Optional[WorkflowImageData]], + model_id: str, + confidence: Optional[float], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + api_url = ( + LOCAL_INFERENCE_API_URL + if WORKFLOWS_REMOTE_API_TARGET != "hosted" + else HOSTED_CLASSIFICATION_URL + ) + client = InferenceHTTPClient( + api_url=api_url, + api_key=self._api_key, + ) + if WORKFLOWS_REMOTE_API_TARGET == "hosted": + client.select_api_v0() + client_config = InferenceConfiguration( + confidence_threshold=confidence, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + max_batch_size=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, + source="workflow-execution", + ) + client.configure(inference_configuration=client_config) + non_empty_inference_images = [i.numpy_image for i in images] + predictions = client.infer( + inference_input=non_empty_inference_images, + model_id=model_id, + ) + if not isinstance(predictions, list): + predictions = [predictions] + return self._post_process_result(images=images, predictions=predictions) + + def _post_process_result( + self, + images: Batch[WorkflowImageData], + predictions: List[dict], + ) -> List[dict]: + inference_id = predictions[0].get(INFERENCE_ID_KEY, None) + predictions = attach_prediction_type_info( + predictions=predictions, + prediction_type="classification", + ) + for prediction, image in zip(predictions, images): + prediction[PARENT_ID_KEY] = image.parent_metadata.parent_id + prediction[ROOT_PARENT_ID_KEY] = ( + image.workflow_root_ancestor_metadata.parent_id + ) + return [ + {"inference_id": inference_id, "predictions": prediction} + for prediction in predictions + ] diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py new file mode 100644 index 000000000..36ced095e --- /dev/null +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py @@ -0,0 +1,330 @@ +from typing import List, Literal, Optional, Type, Union + +from pydantic import ConfigDict, Field, PositiveInt + +from inference.core.entities.requests.inference import ObjectDetectionInferenceRequest +from inference.core.env import ( + HOSTED_DETECT_URL, + LOCAL_INFERENCE_API_URL, + WORKFLOWS_REMOTE_API_TARGET, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, +) +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.core_steps.common.utils import ( + attach_parents_coordinates_to_batch_of_sv_detections, + attach_prediction_type_info_to_sv_detections_batch, + convert_inference_detections_batch_to_sv_detections, + filter_out_unwanted_classes_from_sv_detections_batch, +) +from inference.core.workflows.execution_engine.constants import INFERENCE_ID_KEY +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + BOOLEAN_KIND, + FLOAT_ZERO_TO_ONE_KIND, + IMAGE_KIND, + INTEGER_KIND, + LIST_OF_VALUES_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + ROBOFLOW_MODEL_ID_KIND, + ROBOFLOW_PROJECT_KIND, + FloatZeroToOne, + ImageInputField, + RoboflowModelField, + Selector, + INFERENCE_ID_KIND, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) +from inference_sdk import InferenceConfiguration, InferenceHTTPClient + +LONG_DESCRIPTION = """ +Run inference on a object-detection model hosted on or uploaded to Roboflow. + +You can query any model that is private to your account, or any public model available +on [Roboflow Universe](https://universe.roboflow.com). + +You will need to set your Roboflow API key in your Inference environment to use this +block. To learn more about setting your Roboflow API key, [refer to the Inference +documentation](https://inference.roboflow.com/quickstart/configure_api_key/). +""" + + +class BlockManifest(WorkflowBlockManifest): + model_config = ConfigDict( + json_schema_extra={ + "name": "Object Detection Model", + "version": "v2", + "short_description": "Predict the location of objects with bounding boxes.", + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "model", + }, + protected_namespaces=(), + ) + type: Literal["roboflow_core/roboflow_object_detection_model@v2"] + images: Selector(kind=[IMAGE_KIND]) = ImageInputField + model_id: Union[Selector(kind=[ROBOFLOW_MODEL_ID_KIND]), str] = RoboflowModelField + class_agnostic_nms: Union[Optional[bool], Selector(kind=[BOOLEAN_KIND])] = Field( + default=False, + description="Value to decide if NMS is to be used in class-agnostic mode.", + examples=[True, "$inputs.class_agnostic_nms"], + ) + class_filter: Union[Optional[List[str]], Selector(kind=[LIST_OF_VALUES_KIND])] = ( + Field( + default=None, + description="List of classes to retrieve from predictions (to define subset of those which was used while model training)", + examples=[["a", "b", "c"], "$inputs.class_filter"], + ) + ) + confidence: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.4, + description="Confidence threshold for predictions", + examples=[0.3, "$inputs.confidence_threshold"], + ) + iou_threshold: Union[ + FloatZeroToOne, + Selector(kind=[FLOAT_ZERO_TO_ONE_KIND]), + ] = Field( + default=0.3, + description="Parameter of NMS, to decide on minimum box intersection over union to merge boxes", + examples=[0.4, "$inputs.iou_threshold"], + ) + max_detections: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=300, + description="Maximum number of detections to return", + examples=[300, "$inputs.max_detections"], + ) + max_candidates: Union[PositiveInt, Selector(kind=[INTEGER_KIND])] = Field( + default=3000, + description="Maximum number of candidates as NMS input to be taken into account.", + examples=[3000, "$inputs.max_candidates"], + ) + disable_active_learning: Union[bool, Selector(kind=[BOOLEAN_KIND])] = Field( + default=True, + description="Parameter to decide if Active Learning data sampling is disabled for the model", + examples=[True, "$inputs.disable_active_learning"], + ) + active_learning_target_dataset: Union[ + Selector(kind=[ROBOFLOW_PROJECT_KIND]), Optional[str] + ] = Field( + default=None, + description="Target dataset for Active Learning data sampling - see Roboflow Active Learning " + "docs for more information", + examples=["my_project", "$inputs.al_target_project"], + ) + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="inference_id", kind=[INFERENCE_ID_KIND]), + OutputDefinition( + name="predictions", kind=[OBJECT_DETECTION_PREDICTION_KIND] + ), + ] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class RoboflowObjectDetectionModelBlockV1(WorkflowBlock): + + def __init__( + self, + model_manager: ModelManager, + api_key: Optional[str], + step_execution_mode: StepExecutionMode, + ): + self._model_manager = model_manager + self._api_key = api_key + self._step_execution_mode = step_execution_mode + + @classmethod + def get_init_parameters(cls) -> List[str]: + return ["model_manager", "api_key", "step_execution_mode"] + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + if self._step_execution_mode is StepExecutionMode.LOCAL: + return self.run_locally( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + elif self._step_execution_mode is StepExecutionMode.REMOTE: + return self.run_remotely( + images=images, + model_id=model_id, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + ) + else: + raise ValueError( + f"Unknown step execution mode: {self._step_execution_mode}" + ) + + def run_locally( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + request = ObjectDetectionInferenceRequest( + api_key=self._api_key, + model_id=model_id, + image=inference_images, + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + source="workflow-execution", + ) + self._model_manager.add_model( + model_id=model_id, + api_key=self._api_key, + ) + predictions = self._model_manager.infer_from_request_sync( + model_id=model_id, request=request + ) + if not isinstance(predictions, list): + predictions = [predictions] + predictions = [ + e.model_dump(by_alias=True, exclude_none=True) for e in predictions + ] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def run_remotely( + self, + images: Batch[WorkflowImageData], + model_id: str, + class_agnostic_nms: Optional[bool], + class_filter: Optional[List[str]], + confidence: Optional[float], + iou_threshold: Optional[float], + max_detections: Optional[int], + max_candidates: Optional[int], + disable_active_learning: Optional[bool], + active_learning_target_dataset: Optional[str], + ) -> BlockResult: + api_url = ( + LOCAL_INFERENCE_API_URL + if WORKFLOWS_REMOTE_API_TARGET != "hosted" + else HOSTED_DETECT_URL + ) + client = InferenceHTTPClient( + api_url=api_url, + api_key=self._api_key, + ) + if WORKFLOWS_REMOTE_API_TARGET == "hosted": + client.select_api_v0() + client_config = InferenceConfiguration( + disable_active_learning=disable_active_learning, + active_learning_target_dataset=active_learning_target_dataset, + class_agnostic_nms=class_agnostic_nms, + class_filter=class_filter, + confidence_threshold=confidence, + iou_threshold=iou_threshold, + max_detections=max_detections, + max_candidates=max_candidates, + max_batch_size=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_BATCH_SIZE, + max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, + source="workflow-execution", + ) + client.configure(inference_configuration=client_config) + non_empty_inference_images = [i.numpy_image for i in images] + predictions = client.infer( + inference_input=non_empty_inference_images, + model_id=model_id, + ) + if not isinstance(predictions, list): + predictions = [predictions] + return self._post_process_result( + images=images, + predictions=predictions, + class_filter=class_filter, + ) + + def _post_process_result( + self, + images: Batch[WorkflowImageData], + predictions: List[dict], + class_filter: Optional[List[str]], + ) -> BlockResult: + inference_id = predictions[0].get(INFERENCE_ID_KEY, None) + predictions = convert_inference_detections_batch_to_sv_detections(predictions) + predictions = attach_prediction_type_info_to_sv_detections_batch( + predictions=predictions, + prediction_type="object-detection", + ) + predictions = filter_out_unwanted_classes_from_sv_detections_batch( + predictions=predictions, + classes_to_accept=class_filter, + ) + predictions = attach_parents_coordinates_to_batch_of_sv_detections( + images=images, + predictions=predictions, + ) + return [ + {"inference_id": inference_id, "predictions": prediction} + for prediction in predictions + ] From d11b5f738503ea758fd77a42333e11710aea319a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 12:37:02 +0100 Subject: [PATCH 54/67] Pin supervision to latest version with potential fixes --- requirements/_requirements.txt | 2 +- requirements/requirements.cli.txt | 2 +- requirements/requirements.sdk.http.txt | 2 +- requirements/requirements.test.unit.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index e7568bcd0..25a4f7c25 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -11,7 +11,7 @@ prometheus-fastapi-instrumentator~=7.0.0 redis~=5.0.0 requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x rich~=13.0.0 -supervision>=0.21.0,<=0.25.0 +supervision~=0.25.0 pybase64~=1.0.0 scikit-image>=0.19.0,<=0.24.0 requests-toolbelt~=1.0.0 diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index 19567def0..a0ce6c4eb 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -3,7 +3,7 @@ docker==6.1.3 typer>=0.9.0,<=0.12.5 rich~=13.0.0 PyYAML~=6.0.0 -supervision>=0.21.0,<=0.25.0 +supervision~=0.25.0 opencv-python>=4.8.1.78,<=4.10.0.84 tqdm>=4.0.0,<5.0.0 GPUtil~=1.4.0 diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index 9bed754b9..0d977eee6 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -2,7 +2,7 @@ requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be dataclasses-json~=0.6.0 opencv-python>=4.8.1.78,<=4.10.0.84 pillow>=9.0.0,<11.0 -supervision>=0.21.0,<=0.25.0 +supervision~=0.25.0 numpy<=1.26.4 aiohttp>=3.9.0,<=3.10.11 backoff~=2.2.0 diff --git a/requirements/requirements.test.unit.txt b/requirements/requirements.test.unit.txt index c45356659..d781b5da5 100644 --- a/requirements/requirements.test.unit.txt +++ b/requirements/requirements.test.unit.txt @@ -10,4 +10,4 @@ pytest-timeout>=2.2.0 httpx uvicorn<=0.22.0 aioresponses>=0.7.6 -supervision>=0.20.0,<1.0.0 \ No newline at end of file +supervision~=0.25.0 \ No newline at end of file From 6368141ca735fed82a4c89c1113baa069511b656 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 13:07:24 +0100 Subject: [PATCH 55/67] Add v2 version of affected blocks --- .../formatters/vlm_as_classifier/v2.py | 2 +- .../formatters/vlm_as_detector/v2.py | 2 +- inference/core/workflows/core_steps/loader.py | 31 ++++++++++++++++++- .../roboflow/instance_segmentation/v2.py | 2 +- .../models/roboflow/keypoint_detection/v2.py | 2 +- .../roboflow/multi_class_classification/v2.py | 2 +- .../roboflow/multi_label_classification/v2.py | 2 +- .../models/roboflow/object_detection/v2.py | 4 +-- 8 files changed, 38 insertions(+), 9 deletions(-) diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py index 81ab06be2..d751ba91a 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_classifier/v2.py @@ -14,10 +14,10 @@ BOOLEAN_KIND, CLASSIFICATION_PREDICTION_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py index 955731cb5..f752bf8e0 100644 --- a/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py +++ b/inference/core/workflows/core_steps/formatters/vlm_as_detector/v2.py @@ -28,11 +28,11 @@ from inference.core.workflows.execution_engine.entities.types import ( BOOLEAN_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, LANGUAGE_MODEL_OUTPUT_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index 10f351697..d984dc13a 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -116,9 +116,15 @@ from inference.core.workflows.core_steps.formatters.vlm_as_classifier.v1 import ( VLMAsClassifierBlockV1, ) +from inference.core.workflows.core_steps.formatters.vlm_as_classifier.v2 import ( + VLMAsClassifierBlockV2, +) from inference.core.workflows.core_steps.formatters.vlm_as_detector.v1 import ( VLMAsDetectorBlockV1, ) +from inference.core.workflows.core_steps.formatters.vlm_as_detector.v2 import ( + VLMAsDetectorBlockV2, +) from inference.core.workflows.core_steps.fusion.detections_classes_replacement.v1 import ( DetectionsClassesReplacementBlockV1, ) @@ -178,18 +184,33 @@ from inference.core.workflows.core_steps.models.roboflow.instance_segmentation.v1 import ( RoboflowInstanceSegmentationModelBlockV1, ) +from inference.core.workflows.core_steps.models.roboflow.instance_segmentation.v2 import ( + RoboflowInstanceSegmentationModelBlockV2, +) from inference.core.workflows.core_steps.models.roboflow.keypoint_detection.v1 import ( RoboflowKeypointDetectionModelBlockV1, ) +from inference.core.workflows.core_steps.models.roboflow.keypoint_detection.v2 import ( + RoboflowKeypointDetectionModelBlockV2, +) from inference.core.workflows.core_steps.models.roboflow.multi_class_classification.v1 import ( RoboflowClassificationModelBlockV1, ) +from inference.core.workflows.core_steps.models.roboflow.multi_class_classification.v2 import ( + RoboflowClassificationModelBlockV2, +) from inference.core.workflows.core_steps.models.roboflow.multi_label_classification.v1 import ( RoboflowMultiLabelClassificationModelBlockV1, ) +from inference.core.workflows.core_steps.models.roboflow.multi_label_classification.v2 import ( + RoboflowMultiLabelClassificationModelBlockV2, +) from inference.core.workflows.core_steps.models.roboflow.object_detection.v1 import ( RoboflowObjectDetectionModelBlockV1, ) +from inference.core.workflows.core_steps.models.roboflow.object_detection.v2 import ( + RoboflowObjectDetectionModelBlockV2, +) from inference.core.workflows.core_steps.models.third_party.barcode_detection.v1 import ( BarcodeDetectorBlockV1, ) @@ -336,6 +357,7 @@ IMAGE_KEYPOINTS_KIND, IMAGE_KIND, IMAGE_METADATA_KIND, + INFERENCE_ID_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, @@ -357,7 +379,7 @@ VIDEO_METADATA_KIND, WILDCARD_KIND, ZONE_KIND, - Kind, INFERENCE_ID_KIND, + Kind, ) from inference.core.workflows.prototypes.block import WorkflowBlock @@ -515,6 +537,13 @@ def load_blocks() -> List[Type[WorkflowBlock]]: ReferencePathVisualizationBlockV1, ByteTrackerBlockV3, WebhookSinkBlockV1, + RoboflowInstanceSegmentationModelBlockV2, + RoboflowKeypointDetectionModelBlockV2, + RoboflowClassificationModelBlockV2, + RoboflowMultiLabelClassificationModelBlockV2, + RoboflowObjectDetectionModelBlockV2, + VLMAsClassifierBlockV2, + VLMAsDetectorBlockV2, ] diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py index 6912238aa..a7972dfe4 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v2.py @@ -30,6 +30,7 @@ BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, INSTANCE_SEGMENTATION_PREDICTION_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, @@ -40,7 +41,6 @@ ImageInputField, RoboflowModelField, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py index e464e3ab8..974e84443 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v2.py @@ -31,6 +31,7 @@ BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, INTEGER_KIND, KEYPOINT_DETECTION_PREDICTION_KIND, LIST_OF_VALUES_KIND, @@ -40,7 +41,6 @@ ImageInputField, RoboflowModelField, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py index 75e19b677..a1938b8e4 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py @@ -28,13 +28,13 @@ CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, FloatZeroToOne, ImageInputField, RoboflowModelField, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py index cc76f03a3..1b2b1d8e2 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py @@ -28,13 +28,13 @@ CLASSIFICATION_PREDICTION_KIND, FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, ROBOFLOW_MODEL_ID_KIND, ROBOFLOW_PROJECT_KIND, FloatZeroToOne, ImageInputField, RoboflowModelField, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py index 36ced095e..32889dc1c 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v2.py @@ -28,6 +28,7 @@ BOOLEAN_KIND, FLOAT_ZERO_TO_ONE_KIND, IMAGE_KIND, + INFERENCE_ID_KIND, INTEGER_KIND, LIST_OF_VALUES_KIND, OBJECT_DETECTION_PREDICTION_KIND, @@ -37,7 +38,6 @@ ImageInputField, RoboflowModelField, Selector, - INFERENCE_ID_KIND, ) from inference.core.workflows.prototypes.block import ( BlockResult, @@ -143,7 +143,7 @@ def get_execution_engine_compatibility(cls) -> Optional[str]: return ">=1.3.0,<2.0.0" -class RoboflowObjectDetectionModelBlockV1(WorkflowBlock): +class RoboflowObjectDetectionModelBlockV2(WorkflowBlock): def __init__( self, From 4436cb399130369e8f150c62fe143b3c595bcfed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 13:34:28 +0100 Subject: [PATCH 56/67] Fixing issues spotted while refactor --- .../roboflow/multi_class_classification/v1.py | 6 +- .../roboflow/multi_class_classification/v2.py | 6 +- .../roboflow/multi_label_classification/v1.py | 6 +- .../roboflow/multi_label_classification/v2.py | 5 +- .../sinks/roboflow/custom_metadata/v1.py | 8 +- .../roboflow/test_roboflow_custom_metadata.py | 73 +++++++++++++++++++ 6 files changed, 95 insertions(+), 9 deletions(-) diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index 10c5c11d8..4e33f5fd2 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -242,7 +242,6 @@ def _post_process_result( images: Batch[WorkflowImageData], predictions: List[dict], ) -> BlockResult: - inference_id = predictions[0].get(INFERENCE_ID_KEY, None) predictions = attach_prediction_type_info( predictions=predictions, prediction_type="classification", @@ -253,6 +252,9 @@ def _post_process_result( image.workflow_root_ancestor_metadata.parent_id ) return [ - {"inference_id": inference_id, "predictions": prediction} + { + "inference_id": prediction.get(INFERENCE_ID_KEY), + "predictions": prediction, + } for prediction in predictions ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py index a1938b8e4..27022c9ad 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v2.py @@ -238,7 +238,6 @@ def _post_process_result( images: Batch[WorkflowImageData], predictions: List[dict], ) -> BlockResult: - inference_id = predictions[0].get(INFERENCE_ID_KEY, None) predictions = attach_prediction_type_info( predictions=predictions, prediction_type="classification", @@ -249,6 +248,9 @@ def _post_process_result( image.workflow_root_ancestor_metadata.parent_id ) return [ - {"inference_id": inference_id, "predictions": prediction} + { + "inference_id": prediction.get(INFERENCE_ID_KEY), + "predictions": prediction, + } for prediction in predictions ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index 290982e50..b13d75f7f 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -239,7 +239,6 @@ def _post_process_result( images: Batch[WorkflowImageData], predictions: List[dict], ) -> List[dict]: - inference_id = predictions[0].get(INFERENCE_ID_KEY, None) predictions = attach_prediction_type_info( predictions=predictions, prediction_type="classification", @@ -250,6 +249,9 @@ def _post_process_result( image.workflow_root_ancestor_metadata.parent_id ) return [ - {"inference_id": inference_id, "predictions": prediction} + { + "inference_id": prediction.get(INFERENCE_ID_KEY), + "predictions": prediction, + } for prediction in predictions ] diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py index 1b2b1d8e2..500441ac3 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v2.py @@ -246,6 +246,9 @@ def _post_process_result( image.workflow_root_ancestor_metadata.parent_id ) return [ - {"inference_id": inference_id, "predictions": prediction} + { + "inference_id": prediction.get(INFERENCE_ID_KEY), + "predictions": prediction, + } for prediction in predictions ] diff --git a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py index 7fa2ccfcd..8346fd297 100644 --- a/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py +++ b/inference/core/workflows/core_steps/sinks/roboflow/custom_metadata/v1.py @@ -124,7 +124,7 @@ def run( fire_and_forget: bool, field_name: str, field_value: str, - predictions: sv.Detections, + predictions: Union[sv.Detections, dict], ) -> BlockResult: if self._api_key is None: raise ValueError( @@ -133,7 +133,11 @@ def run( "https://docs.roboflow.com/api-reference/authentication#retrieve-an-api-key to learn how to " "retrieve one." ) - inference_ids: List[str] = predictions.data.get(INFERENCE_ID_KEY, []) + inference_ids: List[str] = [] + if isinstance(predictions, sv.Detections): + inference_ids = predictions.data.get(INFERENCE_ID_KEY, []) + elif INFERENCE_ID_KEY in predictions: + inference_ids: List[str] = [predictions[INFERENCE_ID_KEY]] if len(inference_ids) == 0: return { "error_status": True, diff --git a/tests/workflows/unit_tests/core_steps/sinks/roboflow/test_roboflow_custom_metadata.py b/tests/workflows/unit_tests/core_steps/sinks/roboflow/test_roboflow_custom_metadata.py index b1807d6ab..1d1e4aa26 100644 --- a/tests/workflows/unit_tests/core_steps/sinks/roboflow/test_roboflow_custom_metadata.py +++ b/tests/workflows/unit_tests/core_steps/sinks/roboflow/test_roboflow_custom_metadata.py @@ -217,6 +217,79 @@ def test_run_when_fire_and_forget_with_background_tasks( assert len(background_tasks.tasks) == 1, "Expected background task to be added" +@patch( + "inference.core.workflows.core_steps.sinks.roboflow.custom_metadata.v1.add_custom_metadata_request" +) +def test_run_with_classification_results( + add_custom_metadata_request_mock: MagicMock, +) -> None: + # given + background_tasks = BackgroundTasks() + block = RoboflowCustomMetadataBlockV1( + cache=MemoryCache(), + api_key="my_api_key", + background_tasks=background_tasks, + thread_pool_executor=None, + ) + add_custom_metadata_request_mock.return_value = ( + False, + "Custom metadata upload was successful", + ) + predictions = {"inference_id": "some-id"} + + # when + result = block.run( + fire_and_forget=True, + field_name="location", + field_value="toronto", + predictions=predictions, + ) + + # then + assert result == { + "error_status": False, + "message": "Registration happens in the background task", + }, "Expected success message" + assert len(background_tasks.tasks) == 1, "Expected background task to be added" + + +@patch( + "inference.core.workflows.core_steps.sinks.roboflow.custom_metadata.v1.add_custom_metadata_request" +) +def test_run_with_classification_results_when_inference_id_is_not_given( + add_custom_metadata_request_mock: MagicMock, +) -> None: + # given + background_tasks = BackgroundTasks() + block = RoboflowCustomMetadataBlockV1( + cache=MemoryCache(), + api_key="my_api_key", + background_tasks=background_tasks, + thread_pool_executor=None, + ) + add_custom_metadata_request_mock.return_value = ( + False, + "Custom metadata upload was successful", + ) + predictions = {"predictions": ["a", "b", "c"]} + + # when + result = block.run( + fire_and_forget=True, + field_name="location", + field_value="toronto", + predictions=predictions, + ) + + # then + assert result == { + "error_status": True, + "message": "Custom metadata upload failed because no inference_ids were received. This is known bug " + "(https://github.com/roboflow/inference/issues/567). Please provide a report for the " + "problem under mentioned issue.", + }, "Expected failure due to no inference_ids" + + @patch( "inference.core.workflows.core_steps.sinks.roboflow.custom_metadata.v1.add_custom_metadata_request" ) From e66bdfde5905526380dec1546026a65f6453e4b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 14:07:58 +0100 Subject: [PATCH 57/67] Add tests for changes --- .../test_workflow_with_claude.py | 6 +- .../test_workflow_with_gemini.py | 6 +- .../test_workflow_with_openai.py | 4 +- ..._workflow_detection_plus_classification.py | 88 +++- ...test_workflow_with_active_learning_sink.py | 4 +- .../test_workflow_with_claude_models.py | 186 +++++++- .../test_workflow_with_csv_formatter.py | 2 +- .../test_workflow_with_data_aggregation.py | 2 +- .../execution/test_workflow_with_file_sink.py | 2 +- .../execution/test_workflow_with_florence2.py | 94 +++- .../test_workflow_with_gemini_models.py | 186 +++++++- .../test_workflow_with_masked_crop.py | 93 +++- ...ow_with_model_comparision_visualisation.py | 4 +- ..._workflow_with_ocr_detections_stitching.py | 2 +- .../test_workflow_with_open_ai_models.py | 101 ++++- .../test_workflow_with_rate_limiter.py | 2 +- .../execution/test_workflow_with_sahi.py | 4 +- .../execution/test_workflow_with_sam2.py | 2 +- ...t_workflow_with_stitch_for_dynamic_crop.py | 4 +- ..._with_two_stage_models_and_flow_control.py | 4 +- .../formatters/vlm_as_classifier/__init__.py | 0 .../test_v1.py} | 0 .../formatters/vlm_as_classifier/test_v2.py | 342 +++++++++++++++ .../formatters/vlm_as_detector/__init__.py | 0 .../test_v1.py} | 0 .../formatters/vlm_as_detector/test_v2.py | 404 ++++++++++++++++++ .../instance_segmentation/__init__.py | 0 .../test_v1.py} | 0 .../roboflow/instance_segmentation/test_v2.py | 133 ++++++ .../roboflow/keypoint_detection/__init__.py | 0 .../test_v1.py} | 0 .../roboflow/keypoint_detection/test_v2.py | 132 ++++++ .../multi_class_classification/__init__.py | 0 .../test_v1.py} | 0 .../multi_class_classification/test_v2.py | 93 ++++ .../multi_label_classification/__init__.py | 0 .../test_v1.py} | 0 .../multi_label_classification/test_v2.py | 97 +++++ .../roboflow/object_detection/__init__.py | 0 .../test_v1.py} | 0 .../roboflow/object_detection/test_v2.py | 129 ++++++ 41 files changed, 2072 insertions(+), 54 deletions(-) create mode 100644 tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/__init__.py rename tests/workflows/unit_tests/core_steps/formatters/{test_vlm_as_classifier.py => vlm_as_classifier/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/__init__.py rename tests/workflows/unit_tests/core_steps/formatters/{test_vlm_as_detector.py => vlm_as_detector/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/__init__.py rename tests/workflows/unit_tests/core_steps/models/roboflow/{test_instance_segmentation.py => instance_segmentation/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/__init__.py rename tests/workflows/unit_tests/core_steps/models/roboflow/{test_keypoint_detection.py => keypoint_detection/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/__init__.py rename tests/workflows/unit_tests/core_steps/models/roboflow/{test_multi_class_classification.py => multi_class_classification/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/__init__.py rename tests/workflows/unit_tests/core_steps/models/roboflow/{test_multi_label_classification.py => multi_label_classification/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v2.py create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/__init__.py rename tests/workflows/unit_tests/core_steps/models/roboflow/{test_object_detection.py => object_detection/test_v1.py} (100%) create mode 100644 tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v2.py diff --git a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_claude.py b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_claude.py index 420b213cd..ef43d7bd9 100644 --- a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_claude.py +++ b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_claude.py @@ -24,7 +24,7 @@ "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.claude.output", @@ -183,7 +183,7 @@ def test_structured_parsing_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_detector@v1", + "type": "roboflow_core/vlm_as_detector@v2", "name": "parser", "vlm_output": "$steps.claude.output", "image": "$inputs.image", @@ -281,7 +281,7 @@ def test_object_detection_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.claude.output", diff --git a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_gemini.py b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_gemini.py index 5fb90aa37..c61344c00 100644 --- a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_gemini.py +++ b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_gemini.py @@ -24,7 +24,7 @@ "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.gemini.output", @@ -183,7 +183,7 @@ def test_structured_parsing_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_detector@v1", + "type": "roboflow_core/vlm_as_detector@v2", "name": "parser", "vlm_output": "$steps.gemini.output", "image": "$inputs.image", @@ -281,7 +281,7 @@ def test_object_detection_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.gemini.output", diff --git a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_openai.py b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_openai.py index edd4d5137..112d4342e 100644 --- a/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_openai.py +++ b/tests/inference/hosted_platform_tests/workflows_examples/test_workflow_with_openai.py @@ -114,7 +114,7 @@ def test_image_description_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.gpt.output", @@ -294,7 +294,7 @@ def test_structured_prompting_workflow( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.gpt.output", diff --git a/tests/workflows/integration_tests/execution/test_workflow_detection_plus_classification.py b/tests/workflows/integration_tests/execution/test_workflow_detection_plus_classification.py index b72d7ed64..10b610b45 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_detection_plus_classification.py +++ b/tests/workflows/integration_tests/execution/test_workflow_detection_plus_classification.py @@ -9,7 +9,7 @@ add_to_workflows_gallery, ) -DETECTION_PLUS_CLASSIFICATION_WORKFLOW = { +LEGACY_DETECTION_PLUS_CLASSIFICATION_WORKFLOW = { "version": "1.0", "inputs": [{"type": "WorkflowImage", "name": "image"}], "steps": [ @@ -43,6 +43,78 @@ } +def test_legacy_detection_plus_classification_workflow_when_minimal_valid_input_provided( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=LEGACY_DETECTION_PLUS_CLASSIFICATION_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": dogs_image, + } + ) + + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 1, "Expected 1 element in the output for one input image" + assert set(result[0].keys()) == { + "predictions", + }, "Expected all declared outputs to be delivered" + assert ( + len(result[0]["predictions"]) == 2 + ), "Expected 2 dogs crops on input image, hence 2 nested classification results" + assert [result[0]["predictions"][0]["top"], result[0]["predictions"][1]["top"]] == [ + "116.Parson_russell_terrier", + "131.Wirehaired_pointing_griffon", + ], "Expected predictions to be as measured in reference run" + + +DETECTION_PLUS_CLASSIFICATION_WORKFLOW_V2_BLOCKS = { + "version": "1.0", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.general_detection.predictions", + }, + { + "type": "roboflow_core/roboflow_classification_model@v2", + "name": "breds_classification", + "image": "$steps.cropping.crops", + "model_id": "dog-breed-xpaq6/1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.breds_classification.predictions", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with multiple models", use_case_title="Workflow detection model followed by classifier", @@ -60,7 +132,7 @@ Secondary model is supposed to make prediction from dogs breed classifier model to assign detailed class for each dog instance. """, - workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW, + workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW_V2_BLOCKS, workflow_name_in_app="detection-plus-classification", ) def test_detection_plus_classification_workflow_when_minimal_valid_input_provided( @@ -75,7 +147,7 @@ def test_detection_plus_classification_workflow_when_minimal_valid_input_provide "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW, + workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW_V2_BLOCKS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -113,7 +185,7 @@ def test_detection_plus_classification_workflow_when_nothing_gets_predicted( "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } execution_engine = ExecutionEngine.init( - workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW, + workflow_definition=DETECTION_PLUS_CLASSIFICATION_WORKFLOW_V2_BLOCKS, init_parameters=workflow_init_parameters, max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, ) @@ -140,14 +212,14 @@ def test_detection_plus_classification_workflow_when_nothing_gets_predicted( "inputs": [{"type": "WorkflowImage", "name": "image"}], "steps": [ { - "type": "ObjectDetectionModel", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "general_detection", "image": "$inputs.image", "model_id": "yolov8n-640", "class_filter": ["dog"], }, { - "type": "DetectionsConsensus", + "type": "roboflow_core/detections_consensus@v1", "name": "detections_consensus", "predictions_batches": [ "$steps.general_detection.predictions", @@ -155,13 +227,13 @@ def test_detection_plus_classification_workflow_when_nothing_gets_predicted( "required_votes": 1, }, { - "type": "Crop", + "type": "roboflow_core/dynamic_crop@v1", "name": "cropping", "image": "$inputs.image", "predictions": "$steps.detections_consensus.predictions", }, { - "type": "ClassificationModel", + "type": "roboflow_core/roboflow_classification_model@v2", "name": "breds_classification", "image": "$steps.cropping.crops", "model_id": "dog-breed-xpaq6/1", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_active_learning_sink.py b/tests/workflows/integration_tests/execution/test_workflow_with_active_learning_sink.py index f7b3df4ab..cbee95f50 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_active_learning_sink.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_active_learning_sink.py @@ -30,7 +30,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "general_detection", "image": "$inputs.image", "model_id": "yolov8n-640", @@ -43,7 +43,7 @@ "predictions": "$steps.general_detection.predictions", }, { - "type": "roboflow_core/roboflow_classification_model@v1", + "type": "roboflow_core/roboflow_classification_model@v2", "name": "breds_classification", "image": "$steps.cropping.crops", "model_id": "dog-breed-xpaq6/1", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_claude_models.py b/tests/workflows/integration_tests/execution/test_workflow_with_claude_models.py index d55516aa5..ddac3b424 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_claude_models.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_claude_models.py @@ -302,6 +302,100 @@ def test_workflow_with_captioning_prompt( ), "Expected non-empty string generated" +CLASSIFICATION_WORKFLOW_WITH_LEGACY_PARSER = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "api_key"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/anthropic_claude@v1", + "name": "claude", + "images": "$inputs.image", + "task_type": "classification", + "classes": "$inputs.classes", + "api_key": "$inputs.api_key", + }, + { + "type": "roboflow_core/vlm_as_classifier@v2", + "name": "parser", + "image": "$inputs.image", + "vlm_output": "$steps.claude.output", + "classes": "$steps.claude.classes", + }, + { + "type": "roboflow_core/property_definition@v1", + "name": "top_class", + "operations": [ + {"type": "ClassificationPropertyExtract", "property_name": "top_class"} + ], + "data": "$steps.parser.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "claude_result", + "selector": "$steps.claude.output", + }, + { + "type": "JsonField", + "name": "top_class", + "selector": "$steps.top_class.output", + }, + { + "type": "JsonField", + "name": "parsed_prediction", + "selector": "$steps.parser.*", + }, + ], +} + + +@pytest.mark.skipif( + condition=ANTHROPIC_API_KEY is None, reason="Anthropic API key not provided" +) +def test_workflow_with_multi_class_classifier_prompt_with_legacy_parser( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW_WITH_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + "api_key": ANTHROPIC_API_KEY, + "classes": ["cat", "dog"], + } + ) + + # then + assert len(result) == 1, "Single image given, expected single output" + assert set(result[0].keys()) == { + "claude_result", + "top_class", + "parsed_prediction", + }, "Expected all outputs to be delivered" + assert ( + isinstance(result[0]["claude_result"], str) + and len(result[0]["claude_result"]) > 0 + ), "Expected non-empty string generated" + assert result[0]["top_class"] == "dog" + assert result[0]["parsed_prediction"]["error_status"] is False + + CLASSIFICATION_WORKFLOW = { "version": "1.0", "inputs": [ @@ -319,7 +413,7 @@ def test_workflow_with_captioning_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.claude.output", @@ -359,7 +453,7 @@ def test_workflow_with_captioning_prompt( use_case_title="Using Anthropic Claude as multi-class classifier", use_case_description=""" In this example, Anthropic Claude model is used as classifier. Output from the model is parsed by -special `roboflow_core/vlm_as_classifier@v1` block which turns model output text into +special `roboflow_core/vlm_as_classifier@v2` block which turns model output text into full-blown prediction, which can later be used by other blocks compatible with classification predictions - in this case we extract top-class property. """, @@ -425,7 +519,7 @@ def test_workflow_with_multi_class_classifier_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", # requires image input to construct valid output compatible with "inference" "vlm_output": "$steps.claude.output", @@ -460,7 +554,7 @@ def test_workflow_with_multi_class_classifier_prompt( use_case_title="Using Anthropic Claude as multi-label classifier", use_case_description=""" In this example, Anthropic Claude model is used as multi-label classifier. Output from the model is parsed by -special `roboflow_core/vlm_as_classifier@v1` block which turns model output text into +special `roboflow_core/vlm_as_classifier@v2` block which turns model output text into full-blown prediction, which can later be used by other blocks compatible with classification predictions - in this case we extract top-class property. """, @@ -589,7 +683,7 @@ def test_workflow_with_structured_prompt( assert result[0]["result"] == "2" -OBJECT_DETECTION_WORKFLOW = { +OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER = { "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, @@ -630,6 +724,86 @@ def test_workflow_with_structured_prompt( } +@pytest.mark.skipif( + condition=ANTHROPIC_API_KEY is None, reason="Anthropic API key not provided" +) +def test_workflow_with_object_detection_prompt_when_legacy_parser_in_use( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + "api_key": ANTHROPIC_API_KEY, + "classes": ["cat", "dog"], + } + ) + + # then + assert len(result) == 1, "Single image given, expected single output" + assert set(result[0].keys()) == { + "claude_result", + "parsed_prediction", + }, "Expected all outputs to be delivered" + assert result[0]["parsed_prediction"].data["class_name"].tolist() == [ + "dog", + "dog", + ], "Expected 2 dogs to be detected" + + +OBJECT_DETECTION_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "api_key"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/anthropic_claude@v1", + "name": "claude", + "images": "$inputs.image", + "task_type": "object-detection", + "classes": "$inputs.classes", + "api_key": "$inputs.api_key", + }, + { + "type": "roboflow_core/vlm_as_detector@v2", + "name": "parser", + "vlm_output": "$steps.claude.output", + "image": "$inputs.image", + "classes": "$steps.claude.classes", + "model_type": "anthropic-claude", + "task_type": "object-detection", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "claude_result", + "selector": "$steps.claude.output", + }, + { + "type": "JsonField", + "name": "parsed_prediction", + "selector": "$steps.parser.predictions", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with Visual Language Models", use_case_title="Using Anthropic Claude as object-detection model", @@ -718,7 +892,7 @@ def test_workflow_with_object_detection_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.claude.output", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_csv_formatter.py b/tests/workflows/integration_tests/execution/test_workflow_with_csv_formatter.py index 8dd731d48..3bb13c19d 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_csv_formatter.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_csv_formatter.py @@ -19,7 +19,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model", "images": "$inputs.image", "model_id": "yolov8n-640", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_data_aggregation.py b/tests/workflows/integration_tests/execution/test_workflow_with_data_aggregation.py index 5f9ec4bb5..9d04783c9 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_data_aggregation.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_data_aggregation.py @@ -20,7 +20,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model", "images": "$inputs.image", "model_id": "$inputs.model_id", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_file_sink.py b/tests/workflows/integration_tests/execution/test_workflow_with_file_sink.py index 00c79d676..5f908782b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_file_sink.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_file_sink.py @@ -26,7 +26,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model", "images": "$inputs.image", "model_id": "$inputs.model_id", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_florence2.py b/tests/workflows/integration_tests/execution/test_workflow_with_florence2.py index 35e3c9d93..055a8d289 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_florence2.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_florence2.py @@ -23,7 +23,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model_1", "images": "$inputs.image", "model_id": "yolov8n-640", @@ -148,7 +148,7 @@ def test_florence2_grounded_classification_when_no_grounding_available( "inputs": [{"type": "InferenceImage", "name": "image"}], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model_1", "images": "$inputs.image", "model_id": "yolov8n-640", @@ -298,7 +298,7 @@ def test_florence2_instance_segmentation_grounded_by_input( "inputs": [{"type": "InferenceImage", "name": "image"}], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model_1", "images": "$inputs.image", "model_id": "yolov8n-640", @@ -380,7 +380,7 @@ def test_florence2_grounded_caption( ), "Expected dog to be output by florence2" -FLORENCE_OBJECT_DETECTION_WORKFLOW = { +FLORENCE_OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER = { "version": "1.0", "inputs": [ {"type": "InferenceImage", "name": "image"}, @@ -426,6 +426,90 @@ def test_florence2_grounded_caption( } +@pytest.mark.skipif( + bool_env(os.getenv("SKIP_FLORENCE2_TEST", True)), reason="Skipping Florence 2 test" +) +def test_florence2_object_detection_when_legacy_parser_is_in_use( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=FLORENCE_OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={"image": dogs_image, "classes": ["dog"]} + ) + + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 1, "Expected 1 element in the output for one input image" + assert set(result[0].keys()) == { + "predictions", + "bounding_box_visualization", + }, "Expected all declared outputs to be delivered" + assert len(result[0]["predictions"]) == 2, "Expected two predictions" + assert result[0]["predictions"].data["class_name"].tolist() == [ + "dog", + "dog", + ], "Expected two dogs to be found" + + +FLORENCE_OBJECT_DETECTION_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "InferenceImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/florence_2@v1", + "name": "model", + "images": "$inputs.image", + "task_type": "open-vocabulary-object-detection", + "classes": "$inputs.classes", + }, + { + "type": "roboflow_core/vlm_as_detector@v2", + "name": "vlm_as_detector", + "image": "$inputs.image", + "vlm_output": "$steps.model.raw_output", + "classes": "$steps.model.classes", + "model_type": "florence-2", + "task_type": "open-vocabulary-object-detection", + }, + { + "type": "roboflow_core/bounding_box_visualization@v1", + "name": "bounding_box_visualization", + "image": "$inputs.image", + "predictions": "$steps.vlm_as_detector.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.vlm_as_detector.predictions", + }, + { + "type": "JsonField", + "name": "bounding_box_visualization", + "coordinates_system": "own", + "selector": "$steps.bounding_box_visualization.image", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with Visual Language Models", use_case_title="Florence 2 - object detection", @@ -502,7 +586,7 @@ def test_florence2_object_detection( "task_type": "object-detection", }, { - "type": "roboflow_core/vlm_as_detector@v1", + "type": "roboflow_core/vlm_as_detector@v2", "name": "vlm_as_detector", "image": "$inputs.image", "vlm_output": "$steps.model.raw_output", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_gemini_models.py b/tests/workflows/integration_tests/execution/test_workflow_with_gemini_models.py index 0a583a6b4..5130f91a2 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_gemini_models.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_gemini_models.py @@ -302,7 +302,7 @@ def test_workflow_with_captioning_prompt( ), "Expected non-empty string generated" -CLASSIFICATION_WORKFLOW = { +CLASSIFICATION_WORKFLOW_LEGACY_PARSER = { "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, @@ -354,12 +354,106 @@ def test_workflow_with_captioning_prompt( } +@pytest.mark.skipif( + condition=GOOGLE_API_KEY is None, reason="Google API key not provided" +) +def test_workflow_with_multi_class_classifier_prompt_and_legacy_parser( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + "api_key": GOOGLE_API_KEY, + "classes": ["cat", "dog"], + } + ) + + # then + assert len(result) == 1, "Single image given, expected single output" + assert set(result[0].keys()) == { + "gemini_result", + "top_class", + "parsed_prediction", + }, "Expected all outputs to be delivered" + assert ( + isinstance(result[0]["gemini_result"], str) + and len(result[0]["gemini_result"]) > 0 + ), "Expected non-empty string generated" + assert result[0]["top_class"] == "dog" + assert result[0]["parsed_prediction"]["error_status"] is False + + +CLASSIFICATION_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "api_key"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/google_gemini@v1", + "name": "gemini", + "images": "$inputs.image", + "task_type": "classification", + "classes": "$inputs.classes", + "api_key": "$inputs.api_key", + }, + { + "type": "roboflow_core/vlm_as_classifier@v2", + "name": "parser", + "image": "$inputs.image", + "vlm_output": "$steps.gemini.output", + "classes": "$steps.gemini.classes", + }, + { + "type": "roboflow_core/property_definition@v1", + "name": "top_class", + "operations": [ + {"type": "ClassificationPropertyExtract", "property_name": "top_class"} + ], + "data": "$steps.parser.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "gemini_result", + "selector": "$steps.gemini.output", + }, + { + "type": "JsonField", + "name": "top_class", + "selector": "$steps.top_class.output", + }, + { + "type": "JsonField", + "name": "parsed_prediction", + "selector": "$steps.parser.*", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with Visual Language Models", use_case_title="Using Google's Gemini as multi-class classifier", use_case_description=""" In this example, Google's Gemini model is used as classifier. Output from the model is parsed by -special `roboflow_core/vlm_as_classifier@v1` block which turns model output text into +special `roboflow_core/vlm_as_classifier@v2` block which turns model output text into full-blown prediction, which can later be used by other blocks compatible with classification predictions - in this case we extract top-class property. """, @@ -425,7 +519,7 @@ def test_workflow_with_multi_class_classifier_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.gemini.output", @@ -460,7 +554,7 @@ def test_workflow_with_multi_class_classifier_prompt( use_case_title="Using Google's Gemini as multi-label classifier", use_case_description=""" In this example, Google's Gemini model is used as multi-label classifier. Output from the model is parsed by -special `roboflow_core/vlm_as_classifier@v1` block which turns model output text into +special `roboflow_core/vlm_as_classifier@v2` block which turns model output text into full-blown prediction, which can later be used by other blocks compatible with classification predictions - in this case we extract top-class property. """, @@ -589,7 +683,7 @@ def test_workflow_with_structured_prompt( assert result[0]["result"] == "2" -OBJECT_DETECTION_WORKFLOW = { +OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER = { "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, @@ -630,6 +724,86 @@ def test_workflow_with_structured_prompt( } +@pytest.mark.skipif( + condition=GOOGLE_API_KEY is None, reason="Google API key not provided" +) +def test_workflow_with_object_detection_prompt_and_legacy_parser( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=OBJECT_DETECTION_WORKFLOW_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + "api_key": GOOGLE_API_KEY, + "classes": ["cat", "dog"], + } + ) + + # then + assert len(result) == 1, "Single image given, expected single output" + assert set(result[0].keys()) == { + "gemini_result", + "parsed_prediction", + }, "Expected all outputs to be delivered" + assert result[0]["parsed_prediction"].data["class_name"].tolist() == [ + "dog", + "dog", + ], "Expected 2 dogs to be detected" + + +OBJECT_DETECTION_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "api_key"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/google_gemini@v1", + "name": "gemini", + "images": "$inputs.image", + "task_type": "object-detection", + "classes": "$inputs.classes", + "api_key": "$inputs.api_key", + }, + { + "type": "roboflow_core/vlm_as_detector@v2", + "name": "parser", + "vlm_output": "$steps.gemini.output", + "image": "$inputs.image", + "classes": "$steps.gemini.classes", + "model_type": "google-gemini", + "task_type": "object-detection", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "gemini_result", + "selector": "$steps.gemini.output", + }, + { + "type": "JsonField", + "name": "parsed_prediction", + "selector": "$steps.parser.predictions", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with Visual Language Models", use_case_title="Using Google's Gemini as object-detection model", @@ -718,7 +892,7 @@ def test_workflow_with_object_detection_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.gemini.output", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_masked_crop.py b/tests/workflows/integration_tests/execution/test_workflow_with_masked_crop.py index 6e1684319..ccaa6337a 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_masked_crop.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_masked_crop.py @@ -8,7 +8,7 @@ add_to_workflows_gallery, ) -MASKED_CROP_WORKFLOW = { +MASKED_CROP_LEGACY_WORKFLOW = { "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, @@ -54,6 +54,97 @@ } +def test_legacy_workflow_with_masked_crop( + model_manager: ModelManager, + dogs_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=MASKED_CROP_LEGACY_WORKFLOW, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": dogs_image, + } + ) + + assert isinstance(result, list), "Expected list to be delivered" + assert len(result) == 1, "Expected 1 element in the output for one input image" + assert set(result[0].keys()) == { + "crops", + "predictions", + }, "Expected all declared outputs to be delivered" + assert len(result[0]["crops"]) == 2, "Expected 2 crops for two dogs detected" + crop_image = result[0]["crops"][0].numpy_image + (x_min, y_min, x_max, y_max) = ( + result[0]["predictions"].xyxy[0].round().astype(dtype=int) + ) + crop_mask = result[0]["predictions"].mask[0][y_min:y_max, x_min:x_max] + pixels_outside_mask = np.where( + np.stack([crop_mask] * 3, axis=-1) == 0, + crop_image, + np.zeros_like(crop_image), + ) + pixels_sum = pixels_outside_mask.sum() + assert pixels_sum == 0, "Expected everything black outside mask" + + +MASKED_CROP_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowParameter", + "name": "model_id", + "default_value": "yolov8n-seg-640", + }, + { + "type": "WorkflowParameter", + "name": "confidence", + "default_value": 0.4, + }, + ], + "steps": [ + { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "segmentation", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + "confidence": "$inputs.confidence", + }, + { + "type": "roboflow_core/dynamic_crop@v1", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.segmentation.predictions", + "mask_opacity": 1.0, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "crops", + "selector": "$steps.cropping.crops", + }, + { + "type": "JsonField", + "name": "predictions", + "selector": "$steps.segmentation.predictions", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with data transformations", use_case_title="Instance Segmentation results with background subtracted", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_model_comparision_visualisation.py b/tests/workflows/integration_tests/execution/test_workflow_with_model_comparision_visualisation.py index 9da2500a6..da1bf5d16 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_model_comparision_visualisation.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_model_comparision_visualisation.py @@ -26,13 +26,13 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model", "images": "$inputs.image", "model_id": "$inputs.model_1", }, { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model_1", "images": "$inputs.image", "model_id": "$inputs.model_2", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_ocr_detections_stitching.py b/tests/workflows/integration_tests/execution/test_workflow_with_ocr_detections_stitching.py index b370602b3..5b625e35e 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_ocr_detections_stitching.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_ocr_detections_stitching.py @@ -22,7 +22,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "ocr_detection", "image": "$inputs.image", "model_id": "$inputs.model_id", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_open_ai_models.py b/tests/workflows/integration_tests/execution/test_workflow_with_open_ai_models.py index 74074cccb..00b6e53ff 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_open_ai_models.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_open_ai_models.py @@ -303,7 +303,7 @@ def test_workflow_with_captioning_prompt( ), "Expected non-empty string generated" -CLASSIFICATION_WORKFLOW = { +CLASSIFICATION_WORKFLOW_WITH_LEGACY_PARSER = { "version": "1.0", "inputs": [ {"type": "WorkflowImage", "name": "image"}, @@ -355,12 +355,105 @@ def test_workflow_with_captioning_prompt( } +@pytest.mark.skipif( + condition=OPEN_AI_API_KEY is None, reason="OpenAI API key not provided" +) +def test_workflow_with_multi_class_classifier_prompt_and_legacy_parser( + model_manager: ModelManager, + dogs_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=CLASSIFICATION_WORKFLOW_WITH_LEGACY_PARSER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image], + "api_key": OPEN_AI_API_KEY, + "classes": ["cat", "dog"], + } + ) + + # then + assert len(result) == 1, "Single image given, expected single output" + assert set(result[0].keys()) == { + "gpt_result", + "top_class", + "parsed_prediction", + }, "Expected all outputs to be delivered" + assert ( + isinstance(result[0]["gpt_result"], str) and len(result[0]["gpt_result"]) > 0 + ), "Expected non-empty string generated" + assert result[0]["top_class"] == "dog" + assert result[0]["parsed_prediction"]["error_status"] is False + + +CLASSIFICATION_WORKFLOW = { + "version": "1.0", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "api_key"}, + {"type": "WorkflowParameter", "name": "classes"}, + ], + "steps": [ + { + "type": "roboflow_core/open_ai@v2", + "name": "gpt", + "images": "$inputs.image", + "task_type": "classification", + "classes": "$inputs.classes", + "api_key": "$inputs.api_key", + }, + { + "type": "roboflow_core/vlm_as_classifier@v2", + "name": "parser", + "image": "$inputs.image", + "vlm_output": "$steps.gpt.output", + "classes": "$steps.gpt.classes", + }, + { + "type": "roboflow_core/property_definition@v1", + "name": "top_class", + "operations": [ + {"type": "ClassificationPropertyExtract", "property_name": "top_class"} + ], + "data": "$steps.parser.predictions", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "gpt_result", + "selector": "$steps.gpt.output", + }, + { + "type": "JsonField", + "name": "top_class", + "selector": "$steps.top_class.output", + }, + { + "type": "JsonField", + "name": "parsed_prediction", + "selector": "$steps.parser.*", + }, + ], +} + + @add_to_workflows_gallery( category="Workflows with Visual Language Models", use_case_title="Using GPT as multi-class classifier", use_case_description=""" In this example, GPT model is used as classifier. Output from the model is parsed by -special `roboflow_core/vlm_as_classifier@v1` block which turns GPT output text into +special `roboflow_core/vlm_as_classifier@v2` block which turns GPT output text into full-blown prediction, which can later be used by other blocks compatible with classification predictions - in this case we extract top-class property. """, @@ -425,7 +518,7 @@ def test_workflow_with_multi_class_classifier_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$inputs.image", "vlm_output": "$steps.gpt.output", @@ -627,7 +720,7 @@ def test_workflow_with_structured_prompt( "api_key": "$inputs.api_key", }, { - "type": "roboflow_core/vlm_as_classifier@v1", + "type": "roboflow_core/vlm_as_classifier@v2", "name": "parser", "image": "$steps.cropping.crops", "vlm_output": "$steps.gpt.output", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_rate_limiter.py b/tests/workflows/integration_tests/execution/test_workflow_with_rate_limiter.py index 13bc1965b..cd5b88e06 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_rate_limiter.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_rate_limiter.py @@ -15,7 +15,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "model", "images": "$inputs.image", "model_id": "yolov8n-640", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py index bcdca5b99..f56a3f32b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py @@ -24,7 +24,7 @@ "image": "$inputs.image", }, { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "image": "$steps.image_slicer.slices", "model_id": "yolov8n-640", @@ -395,7 +395,7 @@ def slicer_callback(image_slice: np.ndarray): "image": "$inputs.image", }, { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "image": "$steps.image_slicer.slices", "model_id": "yolov8n-seg-640", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py b/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py index 9371b14c5..8116f7d5b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_sam2.py @@ -179,7 +179,7 @@ def test_sam2_workflow_when_minimal_valid_input_provided_but_filtering_discard_m ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "detection", "model_id": "yolov8n-640", "images": "$inputs.image", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_stitch_for_dynamic_crop.py b/tests/workflows/integration_tests/execution/test_workflow_with_stitch_for_dynamic_crop.py index 5edd32f5b..0b95a9006 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_stitch_for_dynamic_crop.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_stitch_for_dynamic_crop.py @@ -15,7 +15,7 @@ ], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "car_detection", "image": "$inputs.image", "model_id": "yolov8n-640", @@ -28,7 +28,7 @@ "predictions": "$steps.car_detection.predictions", }, { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "plates_detection", "image": "$steps.cropping.crops", "model_id": "vehicle-registration-plates-trudk/2", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_two_stage_models_and_flow_control.py b/tests/workflows/integration_tests/execution/test_workflow_with_two_stage_models_and_flow_control.py index ccf383ff7..721443c09 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_two_stage_models_and_flow_control.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_two_stage_models_and_flow_control.py @@ -10,7 +10,7 @@ "inputs": [{"type": "WorkflowImage", "name": "image"}], "steps": [ { - "type": "roboflow_core/roboflow_object_detection_model@v1", + "type": "roboflow_core/roboflow_object_detection_model@v2", "name": "general_detection", "image": "$inputs.image", "model_id": "yolov8n-640", @@ -23,7 +23,7 @@ "predictions": "$steps.general_detection.predictions", }, { - "type": "roboflow_core/roboflow_classification_model@v1", + "type": "roboflow_core/roboflow_classification_model@v2", "name": "breds_classification", "image": "$steps.cropping.crops", "model_id": "dog-breed-xpaq6/1", diff --git a/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/__init__.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/formatters/test_vlm_as_classifier.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/formatters/test_vlm_as_classifier.py rename to tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v2.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v2.py new file mode 100644 index 000000000..e22c744ce --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_classifier/test_v2.py @@ -0,0 +1,342 @@ +from typing import List, Union + +import numpy as np +import pytest + +from inference.core.workflows.core_steps.formatters.vlm_as_classifier.v2 import ( + BlockManifest, + VLMAsClassifierBlockV2, +) +from inference.core.workflows.execution_engine.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("image", ["$inputs.image", "$steps.some.image"]) +@pytest.mark.parametrize( + "classes", ["$inputs.classes", "$steps.some.classes", ["a", "b"]] +) +def test_block_manifest_parsing_when_input_is_valid( + image: str, classes: Union[str, List[str]] +) -> None: + # given + raw_manifest = { + "type": "roboflow_core/vlm_as_classifier@v2", + "image": image, + "name": "parser", + "vlm_output": "$steps.vlm.output", + "classes": classes, + } + + # when + result = BlockManifest.model_validate(raw_manifest) + + # then + assert result == BlockManifest( + type="roboflow_core/vlm_as_classifier@v2", + name="parser", + image=image, + vlm_output="$steps.vlm.output", + classes=classes, + ) + + +def test_run_when_valid_json_given_for_multi_class_classification() -> None: + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +```json +{"class_name": "car", "confidence": "0.7"} +``` + """ + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=vlm_output, classes=["car", "cat"]) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == [ + {"class": "car", "class_id": 0, "confidence": 0.7}, + {"class": "cat", "class_id": 1, "confidence": 0.0}, + ] + assert result["predictions"]["top"] == "car" + assert abs(result["predictions"]["confidence"] - 0.7) < 1e-5 + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_valid_json_given_for_multi_class_classification_when_unknown_class_predicted() -> ( + None +): + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +```json +{"class_name": "my_class", "confidence": "0.7"} +``` + """ + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=vlm_output, classes=["car", "cat"]) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == [ + {"class": "my_class", "class_id": -1, "confidence": 0.7}, + {"class": "car", "class_id": 0, "confidence": 0.0}, + {"class": "cat", "class_id": 1, "confidence": 0.0}, + ] + assert result["predictions"]["top"] == "my_class" + assert abs(result["predictions"]["confidence"] - 0.7) < 1e-5 + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_valid_json_given_for_multi_label_classification() -> None: + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ + {"predicted_classes": [ + {"class": "cat", "confidence": 0.3}, {"class": "dog", "confidence": 0.6}, + {"class": "cat", "confidence": "0.7"} + ]} + """ + block = VLMAsClassifierBlockV2() + + # when + result = block.run( + image=image, vlm_output=vlm_output, classes=["car", "cat", "dog"] + ) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == { + "car": {"confidence": 0.0, "class_id": 0}, + "cat": {"confidence": 0.7, "class_id": 1}, + "dog": {"confidence": 0.6, "class_id": 2}, + } + assert set(result["predictions"]["predicted_classes"]) == {"cat", "dog"} + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_valid_json_given_for_multi_label_classification_when_unknown_class_provided() -> ( + None +): + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ + {"predicted_classes": [ + {"class": "my_class_1", "confidence": 0.3}, {"class": "my_class_2", "confidence": 0.6}, + {"class": "my_class_1", "confidence": 0.7} + ]} + """ + block = VLMAsClassifierBlockV2() + + # when + result = block.run( + image=image, vlm_output=vlm_output, classes=["car", "cat", "dog"] + ) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == { + "car": {"confidence": 0.0, "class_id": 0}, + "cat": {"confidence": 0.0, "class_id": 1}, + "dog": {"confidence": 0.0, "class_id": 2}, + "my_class_1": {"confidence": 0.7, "class_id": -1}, + "my_class_2": {"confidence": 0.6, "class_id": -1}, + } + assert set(result["predictions"]["predicted_classes"]) == { + "my_class_1", + "my_class_2", + } + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_valid_json_of_unknown_structure_given() -> None: + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run( + image=image, vlm_output='{"some": "data"}', classes=["car", "cat"] + ) + + # then + assert result["error_status"] is True + assert result["predictions"] is None + assert len(result["inference_id"]) > 0 + + +def test_run_when_invalid_json_given() -> None: + # given + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output="invalid_json", classes=["car", "cat"]) + + # then + assert result["error_status"] is True + assert result["predictions"] is None + assert len(result["inference_id"]) > 0 + + +def test_run_when_multiple_jsons_given() -> None: + # given + raw_json = """ + {"predicted_classes": [ + {"class": "cat", "confidence": 0.3}, {"class": "dog", "confidence": 0.6}, + {"class": "cat", "confidence": "0.7"} + ]} + {"predicted_classes": [ + {"class": "cat", "confidence": 0.4}, {"class": "dog", "confidence": 0.7}, + {"class": "cat", "confidence": "0.8"} + ]} + """ + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=raw_json, classes=["car", "cat"]) + + # then + assert result["error_status"] is True + assert result["predictions"] is None + assert len(result["inference_id"]) > 0 + + +def test_run_when_json_in_markdown_block_given() -> None: + # given + raw_json = """ +```json +{"predicted_classes": [ + {"class": "cat", "confidence": 0.3}, {"class": "dog", "confidence": 0.6}, + {"class": "cat", "confidence": "0.7"} +]} +``` +``` + """ + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=raw_json, classes=["car", "cat", "dog"]) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == { + "car": {"confidence": 0.0, "class_id": 0}, + "cat": {"confidence": 0.7, "class_id": 1}, + "dog": {"confidence": 0.6, "class_id": 2}, + } + assert set(result["predictions"]["predicted_classes"]) == {"cat", "dog"} + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_json_in_markdown_block_without_new_lines_given() -> None: + # given + raw_json = """ +```json{"predicted_classes": [{"class": "cat", "confidence": 0.3}, {"class": "dog", "confidence": 0.6}, {"class": "cat", "confidence": "0.7"}]}``` +""" + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=raw_json, classes=["car", "cat", "dog"]) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == { + "car": {"confidence": 0.0, "class_id": 0}, + "cat": {"confidence": 0.7, "class_id": 1}, + "dog": {"confidence": 0.6, "class_id": 2}, + } + assert set(result["predictions"]["predicted_classes"]) == {"cat", "dog"} + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] + + +def test_run_when_multiple_jsons_in_markdown_block_given() -> None: + # given + raw_json = """ +```json +{"predicted_classes": [ + {"class": "cat", "confidence": 0.3}, {"class": "dog", "confidence": 0.6}, + {"class": "cat", "confidence": "0.7"} +]} +``` +```json +{"predicted_classes": [ + {"class": "cat", "confidence": 0.4}, {"class": "dog", "confidence": 0.7}, + {"class": "cat", "confidence": "0.8"} +]} +``` +""" + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + block = VLMAsClassifierBlockV2() + + # when + result = block.run(image=image, vlm_output=raw_json, classes=["car", "cat", "dog"]) + + # then + assert result["error_status"] is False + assert result["predictions"]["image"] == {"width": 168, "height": 192} + assert result["predictions"]["predictions"] == { + "car": {"confidence": 0.0, "class_id": 0}, + "cat": {"confidence": 0.7, "class_id": 1}, + "dog": {"confidence": 0.6, "class_id": 2}, + } + assert set(result["predictions"]["predicted_classes"]) == {"cat", "dog"} + assert result["predictions"]["parent_id"] == "parent" + assert len(result["inference_id"]) > 0 + assert result["inference_id"] == result["predictions"]["inference_id"] diff --git a/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/__init__.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/formatters/test_vlm_as_detector.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/formatters/test_vlm_as_detector.py rename to tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v2.py b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v2.py new file mode 100644 index 000000000..3af0c5c10 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/formatters/vlm_as_detector/test_v2.py @@ -0,0 +1,404 @@ +from typing import List, Union + +import numpy as np +import pytest +import supervision as sv + +from inference.core.workflows.core_steps.formatters.vlm_as_detector.v2 import ( + BlockManifest, + VLMAsDetectorBlockV2, +) +from inference.core.workflows.execution_engine.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + + +@pytest.mark.parametrize("image", ["$inputs.image", "$steps.some.image"]) +@pytest.mark.parametrize( + "classes", ["$inputs.classes", "$steps.some.classes", ["a", "b"]] +) +def test_manifest_parsing_when_input_valid( + image: str, classes: Union[str, List[str]] +) -> None: + # given + raw_manifest = { + "type": "roboflow_core/vlm_as_detector@v2", + "name": "parser", + "image": image, + "vlm_output": "$steps.vlm.output", + "classes": classes, + "model_type": "google-gemini", + "task_type": "object-detection", + } + + # when + result = BlockManifest.model_validate(raw_manifest) + + # then + assert result == BlockManifest( + type="roboflow_core/vlm_as_detector@v2", + name="parser", + image=image, + vlm_output="$steps.vlm.output", + classes=classes, + model_type="google-gemini", + task_type="object-detection", + ) + + +def test_run_method_for_claude_and_gemini_output() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"detections": [ + {"x_min": 0.01, "y_min": 0.15, "x_max": 0.15, "y_max": 0.85, "class_name": "cat", "confidence": 1.98}, + {"x_min": 0.17, "y_min": 0.25, "x_max": 0.32, "y_max": 0.85, "class_name": "dog", "confidence": 0.97}, + {"x_min": 0.33, "y_min": 0.15, "x_max": 0.47, "y_max": 0.85, "class_name": "cat", "confidence": 0.99}, + {"x_min": 0.49, "y_min": 0.30, "x_max": 0.65, "y_max": 0.85, "class_name": "dog", "confidence": 0.98}, + {"x_min": 0.67, "y_min": 0.20, "x_max": 0.82, "y_max": 0.85, "class_name": "cat", "confidence": 0.99}, + {"x_min": 0.84, "y_min": 0.25, "x_max": 0.99, "y_max": 0.85, "class_name": "unknown", "confidence": 0.97} +]} + """ + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=["cat", "dog", "lion"], + model_type="google-gemini", + task_type="object-detection", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array( + [ + [2, 29, 25, 163], + [29, 48, 54, 163], + [55, 29, 79, 163], + [82, 58, 109, 163], + [113, 38, 138, 163], + [141, 48, 166, 163], + ] + ), + atol=1.0, + ) + assert np.allclose(result["predictions"].class_id, np.array([0, 1, 0, 1, 0, -1])) + assert np.allclose( + result["predictions"].confidence, np.array([1.0, 0.97, 0.99, 0.98, 0.99, 0.97]) + ) + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data + + +def test_run_method_for_invalid_claude_and_gemini_output() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ + {"detections": [ + {"x_min": 0.01, "y_min": 0.15, "x_max": 0.15, "y_max": 0.85, "confidence": 1.98}, + {"x_min": 0.17, "y_min": 0.25, "x_max": 0.32, "y_max": 0.85, "class_name": "dog", "confidence": 0.97}, + {"x_min": 0.33, "y_min": 0.15, "x_max": 0.47, "y_max": 0.85, "class_name": "cat", "confidence": 0.99}, + {"x_min": 0.49, "x_max": 0.65, "y_max": 0.85, "class_name": "dog", "confidence": 0.98}, + {"x_min": 0.67, "y_min": 0.20, "x_max": 0.82, "y_max": 0.85, "class_name": "cat", "confidence": 0.99}, + {"x_min": 0.84, "y_min": 0.25, "x_max": 0.99, "y_max": 0.85, "class_name": "unknown", "confidence": 0.97} + ]} + """ + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=["cat", "dog", "lion"], + model_type="google-gemini", + task_type="object-detection", + ) + + # then + assert result["error_status"] is True + assert result["predictions"] is None + assert len(result["inference_id"]) > 0 + + +def test_run_method_for_invalid_json() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + + # when + result = block.run( + image=image, + vlm_output="invalid", + classes=["cat", "dog", "lion"], + model_type="google-gemini", + task_type="object-detection", + ) + + # then + assert result["error_status"] is True + assert result["predictions"] is None + assert len(result["inference_id"]) > 0 + + +def test_formatter_for_florence2_object_detection() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"bboxes": [[434.0, 30.848499298095703, 760.4000244140625, 530.4144897460938], [0.4000000059604645, 96.13949584960938, 528.4000244140625, 564.5574951171875]], "labels": ["cat", "dog"]} +""" + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=["cat", "dog"], + model_type="florence-2", + task_type="object-detection", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array([[434, 30.848, 760.4, 530.41], [0.4, 96.139, 528.4, 564.56]]), + atol=1e-1, + ), "Expected coordinates to be the same as given in raw input" + assert result["predictions"].class_id.tolist() == [7725, 5324] + assert np.allclose(result["predictions"].confidence, np.array([1.0, 1.0])) + assert result["predictions"].data["class_name"].tolist() == ["cat", "dog"] + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data + + +def test_formatter_for_florence2_open_vocabulary_object_detection() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"bboxes": [[434.0, 30.848499298095703, 760.4000244140625, 530.4144897460938], [0.4000000059604645, 96.13949584960938, 528.4000244140625, 564.5574951171875]], "bboxes_labels": ["cat", "dog"]} +""" + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=["cat", "dog"], + model_type="florence-2", + task_type="open-vocabulary-object-detection", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array([[434, 30.848, 760.4, 530.41], [0.4, 96.139, 528.4, 564.56]]), + atol=1e-1, + ), "Expected coordinates to be the same as given in raw input" + assert result["predictions"].class_id.tolist() == [0, 1] + assert np.allclose(result["predictions"].confidence, np.array([1.0, 1.0])) + assert result["predictions"].data["class_name"].tolist() == ["cat", "dog"] + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data + + +def test_formatter_for_florence2_phase_grounded_detection() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"bboxes": [[434.0, 30.848499298095703, 760.4000244140625, 530.4144897460938], [0.4000000059604645, 96.13949584960938, 528.4000244140625, 564.5574951171875]], "labels": ["cat", "dog"]} +""" + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=["cat", "dog"], + model_type="florence-2", + task_type="phrase-grounded-object-detection", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array([[434, 30.848, 760.4, 530.41], [0.4, 96.139, 528.4, 564.56]]), + atol=1e-1, + ), "Expected coordinates to be the same as given in raw input" + assert result["predictions"].class_id.tolist() == [7725, 5324] + assert np.allclose(result["predictions"].confidence, np.array([1.0, 1.0])) + assert result["predictions"].data["class_name"].tolist() == ["cat", "dog"] + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data + + +def test_formatter_for_florence2_region_proposal() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"bboxes": [[434.0, 30.848499298095703, 760.4000244140625, 530.4144897460938], [0.4000000059604645, 96.13949584960938, 528.4000244140625, 564.5574951171875]], "labels": ["", ""]} +""" + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=[], + model_type="florence-2", + task_type="region-proposal", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array([[434, 30.848, 760.4, 530.41], [0.4, 96.139, 528.4, 564.56]]), + atol=1e-1, + ), "Expected coordinates to be the same as given in raw input" + assert result["predictions"].class_id.tolist() == [0, 0] + assert np.allclose(result["predictions"].confidence, np.array([1.0, 1.0])) + assert result["predictions"].data["class_name"].tolist() == ["roi", "roi"] + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data + + +def test_formatter_for_florence2_ocr() -> None: + # given + block = VLMAsDetectorBlockV2() + image = WorkflowImageData( + numpy_image=np.zeros((192, 168, 3), dtype=np.uint8), + parent_metadata=ImageParentMetadata(parent_id="parent"), + ) + vlm_output = """ +{"quad_boxes": [[336.9599914550781, 77.22000122070312, 770.8800048828125, 77.22000122070312, 770.8800048828125, 144.1800079345703, 336.9599914550781, 144.1800079345703], [1273.919921875, 77.22000122070312, 1473.5999755859375, 77.22000122070312, 1473.5999755859375, 109.62000274658203, 1273.919921875, 109.62000274658203], [1652.159912109375, 72.9000015258789, 1828.7999267578125, 70.74000549316406, 1828.7999267578125, 129.05999755859375, 1652.159912109375, 131.22000122070312], [1273.919921875, 126.9000015258789, 1467.8399658203125, 126.9000015258789, 1467.8399658203125, 160.3800048828125, 1273.919921875, 160.3800048828125], [340.79998779296875, 173.3400115966797, 964.7999877929688, 173.3400115966797, 964.7999877929688, 250.02000427246094, 340.79998779296875, 251.10000610351562], [1273.919921875, 177.66000366210938, 1473.5999755859375, 177.66000366210938, 1473.5999755859375, 208.98001098632812, 1273.919921875, 208.98001098632812], [1272.0, 226.260009765625, 1467.8399658203125, 226.260009765625, 1467.8399658203125, 259.7400207519531, 1272.0, 259.7400207519531], [340.79998779296875, 264.05999755859375, 801.5999755859375, 264.05999755859375, 801.5999755859375, 345.0600280761719, 340.79998779296875, 345.0600280761719], [1273.919921875, 277.02001953125, 1471.679931640625, 277.02001953125, 1471.679931640625, 309.4200134277344, 1273.919921875, 309.4200134277344], [1273.919921875, 326.70001220703125, 1467.8399658203125, 326.70001220703125, 1467.8399658203125, 359.1000061035156, 1273.919921875, 359.1000061035156], [336.9599914550781, 376.3800048828125, 980.1599731445312, 376.3800048828125, 980.1599731445312, 417.4200134277344, 336.9599914550781, 417.4200134277344]], "labels": ["What is OCR", "01010110", "veryfi", "010100101", "(Optical Character", "01010010", "011100101", "Recognition?", "0101010", "01010001", "A Friendly Introduction to OCR Software"]} +""" + + # when + result = block.run( + image=image, + vlm_output=vlm_output, + classes=[], + model_type="florence-2", + task_type="ocr-with-text-detection", + ) + + # then + assert result["error_status"] is False + assert isinstance(result["predictions"], sv.Detections) + assert len(result["inference_id"]) > 0 + assert np.allclose( + result["predictions"].xyxy, + np.array( + [ + [336.96, 77.22, 770.88, 144.18], + [1273.9, 77.22, 1473.6, 109.62], + [1652.2, 70.74, 1828.8, 131.22], + [1273.9, 126.9, 1467.8, 160.38], + [340.8, 173.34, 964.8, 251.1], + [1273.9, 177.66, 1473.6, 208.98], + [1272, 226.26, 1467.8, 259.74], + [340.8, 264.06, 801.6, 345.06], + [1273.9, 277.02, 1471.7, 309.42], + [1273.9, 326.7, 1467.8, 359.1], + [336.96, 376.38, 980.16, 417.42], + ] + ), + atol=1e-1, + ), "Expected coordinates to be the same as given in raw input" + assert result["predictions"].class_id.tolist() == [0] * 11 + assert np.allclose(result["predictions"].confidence, np.array([1.0] * 11)) + assert result["predictions"].data["class_name"].tolist() == [ + "What is OCR", + "01010110", + "veryfi", + "010100101", + "(Optical Character", + "01010010", + "011100101", + "Recognition?", + "0101010", + "01010001", + "A Friendly Introduction to OCR Software", + ] + assert "class_name" in result["predictions"].data + assert "image_dimensions" in result["predictions"].data + assert "prediction_type" in result["predictions"].data + assert "parent_coordinates" in result["predictions"].data + assert "parent_dimensions" in result["predictions"].data + assert "root_parent_coordinates" in result["predictions"].data + assert "root_parent_dimensions" in result["predictions"].data + assert "parent_id" in result["predictions"].data + assert "root_parent_id" in result["predictions"].data diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/__init__.py b/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/test_instance_segmentation.py b/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/models/roboflow/test_instance_segmentation.py rename to tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v2.py b/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v2.py new file mode 100644 index 000000000..72edab06a --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/models/roboflow/instance_segmentation/test_v2.py @@ -0,0 +1,133 @@ +from typing import Any + +import pytest +from pydantic import ValidationError + +from inference.core.workflows.core_steps.models.roboflow.instance_segmentation.v2 import ( + BlockManifest, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_instance_segmentation_model_validation_when_minimalistic_config_is_provided( + images_field_alias: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "some", + images_field_alias: "$inputs.image", + "model_id": "some/1", + } + + # when + result = BlockManifest.model_validate(data) + + # then + assert result == BlockManifest( + type="roboflow_core/roboflow_instance_segmentation_model@v2", + name="some", + images="$inputs.image", + model_id="some/1", + ) + + +@pytest.mark.parametrize("field", ["type", "name", "images", "model_id"]) +def test_instance_segmentation_model_validation_when_required_field_is_not_given( + field: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + del data[field] + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_instance_segmentation_model_validation_when_invalid_type_provided() -> None: + # given + data = { + "type": "invalid", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_instance_segmentation_model_validation_when_model_id_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": None, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_instance_segmentation_model_validation_when_active_learning_flag_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + "disable_active_learning": "some", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +@pytest.mark.parametrize( + "parameter, value", + [ + ("confidence", 1.1), + ("images", "some"), + ("disable_active_learning", "some"), + ("class_agnostic_nms", "some"), + ("class_filter", "some"), + ("confidence", "some"), + ("confidence", 1.1), + ("iou_threshold", "some"), + ("iou_threshold", 1.1), + ("max_detections", 0), + ("max_candidates", 0), + ("mask_decode_mode", "some"), + ("tradeoff_factor", 1.1), + ], +) +def test_instance_segmentation_model_when_parameters_have_invalid_type( + parameter: str, + value: Any, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_instance_segmentation_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + parameter: value, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/__init__.py b/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/test_keypoint_detection.py b/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/models/roboflow/test_keypoint_detection.py rename to tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v2.py b/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v2.py new file mode 100644 index 000000000..f766781e1 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/models/roboflow/keypoint_detection/test_v2.py @@ -0,0 +1,132 @@ +from typing import Any + +import pytest +from pydantic import ValidationError + +from inference.core.workflows.core_steps.models.roboflow.keypoint_detection.v2 import ( + BlockManifest, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_keypoints_detection_model_validation_when_minimalistic_config_is_provided( + images_field_alias: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "some", + images_field_alias: "$inputs.image", + "model_id": "some/1", + } + + # when + result = BlockManifest.model_validate(data) + + # then + assert result == BlockManifest( + type="roboflow_core/roboflow_keypoint_detection_model@v2", + name="some", + images="$inputs.image", + model_id="some/1", + ) + + +@pytest.mark.parametrize("field", ["type", "name", "images", "model_id"]) +def test_keypoints_detection_model_validation_when_required_field_is_not_given( + field: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + del data[field] + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_keypoints_object_detection_model_validation_when_invalid_type_provided() -> ( + None +): + # given + data = { + "type": "invalid", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_keypoints_detection_model_validation_when_model_id_has_invalid_type() -> None: + # given + data = { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": None, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_keypoints_detection_model_validation_when_active_learning_flag_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + "disable_active_learning": "some", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +@pytest.mark.parametrize( + "parameter, value", + [ + ("images", "some"), + ("disable_active_learning", "some"), + ("class_agnostic_nms", "some"), + ("class_filter", "some"), + ("confidence", "some"), + ("confidence", 1.1), + ("iou_threshold", "some"), + ("iou_threshold", 1.1), + ("max_detections", 0), + ("max_candidates", 0), + ("keypoint_confidence", "some"), + ("keypoint_confidence", 1.1), + ], +) +def test_keypoints_detection_model_when_parameters_have_invalid_type( + parameter: str, + value: Any, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_keypoint_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + parameter: value, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/__init__.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/test_multi_class_classification.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/models/roboflow/test_multi_class_classification.py rename to tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v2.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v2.py new file mode 100644 index 000000000..5af8ac2f9 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_class_classification/test_v2.py @@ -0,0 +1,93 @@ +import pytest +from pydantic import ValidationError + +from inference.core.workflows.core_steps.models.roboflow.multi_class_classification.v2 import ( + BlockManifest, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_classification_model_validation_when_minimalistic_config_is_provided( + images_field_alias: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_classification_model@v2", + "name": "some", + images_field_alias: "$inputs.image", + "model_id": "some/1", + } + + # when + result = BlockManifest.model_validate(data) + + # then + assert result == BlockManifest( + type="roboflow_core/roboflow_classification_model@v2", + name="some", + images="$inputs.image", + model_id="some/1", + ) + + +@pytest.mark.parametrize("field", ["type", "name", "images", "model_id"]) +def test_classification_model_validation_when_required_field_is_not_given( + field: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + del data[field] + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_classification_model_validation_when_invalid_type_provided() -> None: + # given + data = { + "type": "invalid", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_classification_model_validation_when_model_id_has_invalid_type() -> None: + # given + data = { + "type": "roboflow_core/roboflow_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": None, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_classification_model_validation_when_active_learning_flag_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + "disable_active_learning": "some", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/__init__.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/test_multi_label_classification.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/models/roboflow/test_multi_label_classification.py rename to tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v2.py b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v2.py new file mode 100644 index 000000000..ec4acbd33 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/models/roboflow/multi_label_classification/test_v2.py @@ -0,0 +1,97 @@ +import pytest +from pydantic import ValidationError + +from inference.core.workflows.core_steps.models.roboflow.multi_label_classification.v2 import ( + BlockManifest, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_multi_label_classification_model_validation_when_minimalistic_config_is_provided( + images_field_alias: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_multi_label_classification_model@v2", + "name": "some", + images_field_alias: "$inputs.image", + "model_id": "some/1", + } + + # when + result = BlockManifest.model_validate(data) + + # then + assert result == BlockManifest( + type="roboflow_core/roboflow_multi_label_classification_model@v2", + name="some", + images="$inputs.image", + model_id="some/1", + ) + + +@pytest.mark.parametrize("field", ["type", "name", "images", "model_id"]) +def test_multi_label_classification_model_validation_when_required_field_is_not_given( + field: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_multi_label_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + del data[field] + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_multi_label_classification_model_validation_when_invalid_type_provided() -> ( + None +): + # given + data = { + "type": "invalid", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_multi_label_classification_model_validation_when_model_id_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_multi_label_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": None, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) + + +def test_multi_label_classification_model_validation_when_active_learning_flag_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_multi_label_classification_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + "disable_active_learning": "some", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.model_validate(data) diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/__init__.py b/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/test_object_detection.py b/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v1.py similarity index 100% rename from tests/workflows/unit_tests/core_steps/models/roboflow/test_object_detection.py rename to tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v1.py diff --git a/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v2.py b/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v2.py new file mode 100644 index 000000000..a68ea8d03 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/models/roboflow/object_detection/test_v2.py @@ -0,0 +1,129 @@ +from typing import Any + +import pytest +from pydantic import ValidationError + +from inference.core.workflows.core_steps.models.roboflow.object_detection.v2 import ( + BlockManifest, +) + + +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_object_detection_model_validation_when_minimalistic_config_is_provided( + images_field_alias: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "some", + images_field_alias: "$inputs.image", + "model_id": "some/1", + } + + # when + result = BlockManifest.validate(data) + + # then + assert result == BlockManifest( + type="roboflow_core/roboflow_object_detection_model@v2", + name="some", + images="$inputs.image", + model_id="some/1", + ) + + +@pytest.mark.parametrize("field", ["type", "name", "images", "model_id"]) +def test_object_detection_model_validation_when_required_field_is_not_given( + field: str, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + del data[field] + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.validate(data) + + +def test_object_detection_model_validation_when_invalid_type_provided() -> None: + # given + data = { + "type": "invalid", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.validate(data) + + +def test_object_detection_model_validation_when_model_id_has_invalid_type() -> None: + # given + data = { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": None, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.validate(data) + + +def test_object_detection_model_validation_when_active_learning_flag_has_invalid_type() -> ( + None +): + # given + data = { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + "disable_active_learning": "some", + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.validate(data) + + +@pytest.mark.parametrize( + "parameter, value", + [ + ("confidence", 1.1), + ("images", "some"), + ("disable_active_learning", "some"), + ("class_agnostic_nms", "some"), + ("class_filter", "some"), + ("confidence", "some"), + ("confidence", 1.1), + ("iou_threshold", "some"), + ("iou_threshold", 1.1), + ("max_detections", 0), + ("max_candidates", 0), + ], +) +def test_object_detection_model_when_parameters_have_invalid_type( + parameter: str, + value: Any, +) -> None: + # given + data = { + "type": "roboflow_core/roboflow_object_detection_model@v2", + "name": "some", + "images": "$inputs.image", + "model_id": "some/1", + parameter: value, + } + + # when + with pytest.raises(ValidationError): + _ = BlockManifest.validate(data) From 47960abb4197bdaffc07bf59066977b1ec41eaa7 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:39:09 +0100 Subject: [PATCH 58/67] Use measured fps when fetching frames from live stream --- inference/core/interfaces/camera/video_source.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/inference/core/interfaces/camera/video_source.py b/inference/core/interfaces/camera/video_source.py index 97bf2035d..dab517174 100644 --- a/inference/core/interfaces/camera/video_source.py +++ b/inference/core/interfaces/camera/video_source.py @@ -862,11 +862,19 @@ def consume_frame( }, status_update_handlers=self._status_update_handlers, ) + measured_source_fps = declared_source_fps + if not is_source_video_file: + if hasattr(self._stream_consumption_pace_monitor, "fps"): + measured_source_fps = self._stream_consumption_pace_monitor.fps + else: + measured_source_fps = self._stream_consumption_pace_monitor() + if self._video_fps_should_be_sub_sampled(): return True return self._consume_stream_frame( video=video, declared_source_fps=declared_source_fps, + measured_source_fps=measured_source_fps, is_source_video_file=is_source_video_file, frame_timestamp=frame_timestamp, buffer=buffer, @@ -912,6 +920,7 @@ def _consume_stream_frame( self, video: VideoFrameProducer, declared_source_fps: Optional[float], + measured_source_fps: Optional[float], is_source_video_file: Optional[bool], frame_timestamp: datetime, buffer: Queue, @@ -954,7 +963,9 @@ def _consume_stream_frame( buffer=buffer, decoding_pace_monitor=self._decoding_pace_monitor, source_id=source_id, - fps=declared_source_fps, + fps=( + declared_source_fps if is_source_video_file else measured_source_fps + ), comes_from_video_file=is_source_video_file, ) if self._buffer_filling_strategy in DROP_OLDEST_STRATEGIES: From 87a69cbcf67d09bc6962718bbc4255011ac69ef3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 14:39:21 +0100 Subject: [PATCH 59/67] Bring back old supervision pin --- requirements/_requirements.txt | 2 +- requirements/requirements.cli.txt | 2 +- requirements/requirements.sdk.http.txt | 2 +- requirements/requirements.test.unit.txt | 2 +- .../execution/test_workflow_with_sahi.py | 2 +- .../test_workflow_with_video_metadata_processing.py | 11 ++++------- 6 files changed, 9 insertions(+), 12 deletions(-) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 25a4f7c25..690314512 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -11,7 +11,7 @@ prometheus-fastapi-instrumentator~=7.0.0 redis~=5.0.0 requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x rich~=13.0.0 -supervision~=0.25.0 +supervision>=0.21.0,<=0.22.0 pybase64~=1.0.0 scikit-image>=0.19.0,<=0.24.0 requests-toolbelt~=1.0.0 diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index a0ce6c4eb..d409889a3 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -3,7 +3,7 @@ docker==6.1.3 typer>=0.9.0,<=0.12.5 rich~=13.0.0 PyYAML~=6.0.0 -supervision~=0.25.0 +supervision>=0.21.0,<=0.22.0 opencv-python>=4.8.1.78,<=4.10.0.84 tqdm>=4.0.0,<5.0.0 GPUtil~=1.4.0 diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index 0d977eee6..1b08ae9d8 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -2,7 +2,7 @@ requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be dataclasses-json~=0.6.0 opencv-python>=4.8.1.78,<=4.10.0.84 pillow>=9.0.0,<11.0 -supervision~=0.25.0 +supervision>=0.21.0,<=0.22.0 numpy<=1.26.4 aiohttp>=3.9.0,<=3.10.11 backoff~=2.2.0 diff --git a/requirements/requirements.test.unit.txt b/requirements/requirements.test.unit.txt index d781b5da5..a403444b2 100644 --- a/requirements/requirements.test.unit.txt +++ b/requirements/requirements.test.unit.txt @@ -10,4 +10,4 @@ pytest-timeout>=2.2.0 httpx uvicorn<=0.22.0 aioresponses>=0.7.6 -supervision~=0.25.0 \ No newline at end of file +supervision>=0.21.0,<=0.22.0 \ No newline at end of file diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py index 17ee2f56b..bcdca5b99 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_sahi.py @@ -352,7 +352,7 @@ def slicer_callback(image_slice: np.ndarray): callback=slicer_callback, slice_wh=(640, 640), overlap_ratio_wh=(0.2, 0.2), - overlap_filter=sv.OverlapFilter.NON_MAX_SUPPRESSION, + overlap_filter_strategy=sv.OverlapFilter.NON_MAX_SUPPRESSION, iou_threshold=0.3, ) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py b/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py index 7a2027fe8..fb5acca28 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_video_metadata_processing.py @@ -214,10 +214,7 @@ def test_workflow_with_tracker( first_crowd_frame_tracker_ids == second_crowd_frame_tracker_ids ), "The same image, expected no tracker IDs change" assert first_license_plate_frame_tracker_ids == [ - 1, - 2, - 3, - ], ( - "Since `supervision>=0.25.0` tracker IDs are unique for each new tracker instance - and we " - "expect new tracker for `metadata_license_plate_image` to be created - hence fresh tracker ids" - ) + 15, + 16, + 17, + ], "External IDs for all trackers are global, hence we offset by numer of all ever generated tracker IDs" From b27bc34856a5d20e6e613a912b8f6eeb7c7d22da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 15:28:20 +0100 Subject: [PATCH 60/67] Fix builds --- docker/dockerfiles/Dockerfile.onnx.cpu | 4 ++-- docker/dockerfiles/Dockerfile.onnx.cpu.dev | 4 ++-- docker/dockerfiles/Dockerfile.onnx.cpu.parallel | 2 +- docker/dockerfiles/Dockerfile.onnx.cpu.slim | 2 +- docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager | 2 +- docker/dockerfiles/Dockerfile.onnx.gpu | 4 ++-- docker/dockerfiles/Dockerfile.onnx.gpu.dev | 2 +- docker/dockerfiles/Dockerfile.onnx.gpu.parallel | 4 ++-- docker/dockerfiles/Dockerfile.onnx.gpu.slim | 2 +- docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager | 2 +- docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 | 2 +- docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 | 2 +- docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 | 2 +- .../dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager | 2 +- docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 | 4 ++-- docker/dockerfiles/Dockerfile.onnx.lambda | 2 +- docker/dockerfiles/Dockerfile.onnx.lambda.slim | 2 +- docker/dockerfiles/Dockerfile.onnx.trt | 2 +- docker/dockerfiles/Dockerfile.onnx.udp.gpu | 2 +- docker/dockerfiles/Dockerfile.stream_management_api | 2 +- requirements/requirements.waf.txt | 2 +- 21 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu b/docker/dockerfiles/Dockerfile.onnx.cpu index c04e3ba6f..707b4c40a 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu +++ b/docker/dockerfiles/Dockerfile.onnx.cpu @@ -39,7 +39,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip @@ -51,7 +51,7 @@ COPY --from=base / / WORKDIR /build COPY . . RUN make create_wheels -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl "setuptools<=75.5.0" RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then pip3 install -r requirements/requirements.vino.txt; rm -rf ~/.cache/pip; fi diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.dev b/docker/dockerfiles/Dockerfile.onnx.cpu.dev index a41f84d06..8645c9ab2 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.dev +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.dev @@ -39,7 +39,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip @@ -51,7 +51,7 @@ COPY --from=base / / WORKDIR /build COPY . . RUN make create_wheels -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_cpu*.whl dist/inference_sdk*.whl "setuptools<=75.5.0" RUN pip3 install watchdog[watchmedo] RUN if [ "${TARGETPLATFORM}" = "linux/amd64" ]; then pip3 install -r requirements/requirements.vino.txt; rm -rf ~/.cache/pip; fi diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.parallel b/docker/dockerfiles/Dockerfile.onnx.cpu.parallel index 61bcb3ea5..4da451f95 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.parallel +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.parallel @@ -42,7 +42,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.parallel.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip RUN apt-get update && apt-get install -y lsb-release curl gpg diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.slim b/docker/dockerfiles/Dockerfile.onnx.cpu.slim index b3119a42f..9e421a13e 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.slim +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.slim @@ -32,7 +32,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager b/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager index 957bf046b..ead560587 100644 --- a/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.cpu.stream_manager @@ -23,7 +23,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.cpu.txt \ -r requirements.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu b/docker/dockerfiles/Dockerfile.onnx.gpu index d15a33c1c..1d7b8c5dc 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu +++ b/docker/dockerfiles/Dockerfile.onnx.gpu @@ -44,7 +44,7 @@ RUN python3 -m pip install \ -r requirements.yolo_world.txt \ -r requirements.transformers.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip @@ -61,7 +61,7 @@ WORKDIR /build COPY . . RUN ln -s /usr/bin/python3 /usr/bin/python RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install --no-cache-dir dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 +RUN pip3 install --no-cache-dir dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl "setuptools<=75.5.0" WORKDIR /notebooks COPY examples/notebooks . diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.dev b/docker/dockerfiles/Dockerfile.onnx.gpu.dev index d2fc5ff2b..211cb8c92 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.dev +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.dev @@ -49,7 +49,7 @@ RUN python3 -m pip install \ -r requirements.sdk.http.txt \ -r requirements.cli.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.parallel b/docker/dockerfiles/Dockerfile.onnx.gpu.parallel index 5e2c123b7..738ba8cd8 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.parallel +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.parallel @@ -31,7 +31,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.gaze.txt \ -r requirements.parallel.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip @@ -48,7 +48,7 @@ WORKDIR /build COPY . . RUN ln -s /usr/bin/python3 /usr/bin/python RUN /bin/make create_wheels_for_gpu_notebook -RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 +RUN pip3 install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl "setuptools<=75.5.0" WORKDIR /notebooks COPY examples/notebooks . diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.slim b/docker/dockerfiles/Dockerfile.onnx.gpu.slim index 8ced44539..1739789c8 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.slim +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.slim @@ -27,7 +27,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.waf.txt \ -r requirements.cli.txt \ -r requirements.sdk.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager b/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager index bdefe1f20..810f729bf 100644 --- a/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.gpu.stream_manager @@ -21,7 +21,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.http.txt \ -r requirements.gpu.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 index 31f8c5f2a..4d9bd9d04 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.4.5.0 @@ -42,7 +42,7 @@ RUN python3.8 -m pip install --upgrade pip wheel Cython && python3.8 -m pip inst -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 b/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 index 1b6ae8d90..8c320d33d 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.4.6.1 @@ -57,7 +57,7 @@ RUN python3.9 -m pip install --upgrade pip "h5py<=3.10.0" && python3.9 -m pip in -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 index efb0114e9..f26dce4f2 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1 @@ -52,7 +52,7 @@ RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \ -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ jupyterlab \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager index e98a6fec4..4b86aa06b 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager @@ -40,7 +40,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.clip.txt \ -r requirements.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 index 9302b2879..2807037dc 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.6.0.0 @@ -35,7 +35,7 @@ RUN python3 -m pip install --upgrade pip && \ -r requirements/requirements.sdk.http.txt \ -r requirements/requirements.yolo_world.txt \ -r requirements/requirements.jetson.txt \ - setuptools<=75.5.0 + "setuptools<=75.5.0" # Build the application WORKDIR /build @@ -46,7 +46,7 @@ RUN rm -f dist/* && \ python3 .release/pypi/inference.gpu.setup.py bdist_wheel && \ python3 .release/pypi/inference.sdk.setup.py bdist_wheel && \ python3 .release/pypi/inference.cli.setup.py bdist_wheel && \ - python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl setuptools<=75.5.0 + python3 -m pip install dist/inference_cli*.whl dist/inference_core*.whl dist/inference_gpu*.whl dist/inference_sdk*.whl "setuptools<=75.5.0" # Set up the application runtime WORKDIR /app diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda b/docker/dockerfiles/Dockerfile.onnx.lambda index 9d4adc67c..4bb7e2b17 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda +++ b/docker/dockerfiles/Dockerfile.onnx.lambda @@ -47,7 +47,7 @@ RUN pip3 install \ -r requirements.sdk.http.txt \ -r requirements.yolo_world.txt \ mangum \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ --target "${LAMBDA_TASK_ROOT}" \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.lambda.slim b/docker/dockerfiles/Dockerfile.onnx.lambda.slim index 85b44f422..12c746b5e 100644 --- a/docker/dockerfiles/Dockerfile.onnx.lambda.slim +++ b/docker/dockerfiles/Dockerfile.onnx.lambda.slim @@ -35,7 +35,7 @@ RUN pip3 install \ -r requirements.hosted.txt \ -r requirements.sdk.http.txt \ mangum \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ --target "${LAMBDA_TASK_ROOT}" \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.trt b/docker/dockerfiles/Dockerfile.onnx.trt index cb6328ed1..c57117f73 100644 --- a/docker/dockerfiles/Dockerfile.onnx.trt +++ b/docker/dockerfiles/Dockerfile.onnx.trt @@ -33,7 +33,7 @@ RUN pip install --upgrade pip && pip install \ -r requirements.doctr.txt \ -r requirements.groundingdino.txt \ -r requirements.sdk.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.onnx.udp.gpu b/docker/dockerfiles/Dockerfile.onnx.udp.gpu index 33a89b9ed..efd79f235 100644 --- a/docker/dockerfiles/Dockerfile.onnx.udp.gpu +++ b/docker/dockerfiles/Dockerfile.onnx.udp.gpu @@ -25,7 +25,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r requirements.clip.txt \ -r requirements.http.txt \ -r requirements.gpu.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/docker/dockerfiles/Dockerfile.stream_management_api b/docker/dockerfiles/Dockerfile.stream_management_api index 0e9dd8184..7f9c8ef5c 100644 --- a/docker/dockerfiles/Dockerfile.stream_management_api +++ b/docker/dockerfiles/Dockerfile.stream_management_api @@ -23,7 +23,7 @@ RUN pip3 install --upgrade pip && pip3 install \ -r _requirements.txt \ -r requirements.cpu.txt \ -r requirements.http.txt \ - setuptools<=75.5.0 \ + "setuptools<=75.5.0" \ --upgrade \ && rm -rf ~/.cache/pip diff --git a/requirements/requirements.waf.txt b/requirements/requirements.waf.txt index 64b858d86..d5b5e0631 100644 --- a/requirements/requirements.waf.txt +++ b/requirements/requirements.waf.txt @@ -1 +1 @@ -metlo~=0.1.5 \ No newline at end of file +metlo>=0.0.17,<=0.1.5 \ No newline at end of file From 53cdf9a2c324f6a4e2cc7eb3e5c150777acc8c5d Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Thu, 14 Nov 2024 15:37:28 +0100 Subject: [PATCH 61/67] Store measured_fps in inference.core.interfaces.camera.entities.VideoFrame; extend inference.core.workflows.execution_engine.entities.base.VideoMetadata to store measured_fps --- inference/core/interfaces/camera/entities.py | 2 ++ inference/core/interfaces/camera/video_source.py | 11 ++++++----- .../stream/model_handlers/roboflow_models.py | 10 ++++++++-- .../interfaces/stream/model_handlers/workflows.py | 3 +++ .../core/workflows/execution_engine/entities/base.py | 4 ++++ 5 files changed, 23 insertions(+), 7 deletions(-) diff --git a/inference/core/interfaces/camera/entities.py b/inference/core/interfaces/camera/entities.py index 6e81e163e..95d56cbc5 100644 --- a/inference/core/interfaces/camera/entities.py +++ b/inference/core/interfaces/camera/entities.py @@ -62,7 +62,9 @@ class VideoFrame: image: np.ndarray frame_id: FrameID frame_timestamp: FrameTimestamp + # TODO: in next major version of inference replace `fps` with `declared_fps` fps: Optional[float] = None + measured_fps: Optional[float] = None source_id: Optional[int] = None comes_from_video_file: Optional[bool] = None diff --git a/inference/core/interfaces/camera/video_source.py b/inference/core/interfaces/camera/video_source.py index dab517174..b73a7211e 100644 --- a/inference/core/interfaces/camera/video_source.py +++ b/inference/core/interfaces/camera/video_source.py @@ -963,9 +963,8 @@ def _consume_stream_frame( buffer=buffer, decoding_pace_monitor=self._decoding_pace_monitor, source_id=source_id, - fps=( - declared_source_fps if is_source_video_file else measured_source_fps - ), + declared_source_fps=declared_source_fps, + measured_source_fps=measured_source_fps, comes_from_video_file=is_source_video_file, ) if self._buffer_filling_strategy in DROP_OLDEST_STRATEGIES: @@ -1164,7 +1163,8 @@ def decode_video_frame_to_buffer( buffer: Queue, decoding_pace_monitor: sv.FPSMonitor, source_id: Optional[int], - fps: Optional[float] = None, + declared_source_fps: Optional[float] = None, + measured_source_fps: Optional[float] = None, comes_from_video_file: Optional[bool] = None, ) -> bool: success, image = video.retrieve() @@ -1175,7 +1175,8 @@ def decode_video_frame_to_buffer( image=image, frame_id=frame_id, frame_timestamp=frame_timestamp, - fps=fps, + fps=declared_source_fps, + measured_fps=measured_source_fps, source_id=source_id, comes_from_video_file=comes_from_video_file, ) diff --git a/inference/core/interfaces/stream/model_handlers/roboflow_models.py b/inference/core/interfaces/stream/model_handlers/roboflow_models.py index 3145b135b..cb2d995a9 100644 --- a/inference/core/interfaces/stream/model_handlers/roboflow_models.py +++ b/inference/core/interfaces/stream/model_handlers/roboflow_models.py @@ -1,4 +1,4 @@ -from typing import Any, List +from typing import List from inference.core.interfaces.camera.entities import VideoFrame from inference.core.interfaces.stream.entities import ModelConfig @@ -12,10 +12,16 @@ def default_process_frame( inference_config: ModelConfig, ) -> List[dict]: postprocessing_args = inference_config.to_postprocessing_params() + # TODO: handle batch input in usage + fps = video_frame[0].fps + if video_frame[0].measured_fps: + fps = video_frame[0].measured_fps + if not fps: + fps = 0 predictions = wrap_in_list( model.infer( [f.image for f in video_frame], - usage_fps=video_frame[0].fps, + usage_fps=fps, usage_api_key=model.api_key, **postprocessing_args, ) diff --git a/inference/core/interfaces/stream/model_handlers/workflows.py b/inference/core/interfaces/stream/model_handlers/workflows.py index e8c798541..c1da0c440 100644 --- a/inference/core/interfaces/stream/model_handlers/workflows.py +++ b/inference/core/interfaces/stream/model_handlers/workflows.py @@ -19,6 +19,8 @@ def run_workflow( workflows_parameters = {} # TODO: pass fps reflecting each stream to workflows_parameters fps = video_frames[0].fps + if video_frames[0].measured_fps: + fps = video_frames[0].measured_fps if fps is None: # for FPS reporting we expect 0 when FPS cannot be determined fps = 0 @@ -32,6 +34,7 @@ def run_workflow( frame_number=video_frame.frame_id, frame_timestamp=video_frame.frame_timestamp, fps=video_frame.fps, + measured_fps=video_frame.measured_fps, comes_from_video_file=video_frame.comes_from_video_file, ) for video_frame in video_frames diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index 09cc17d26..a60b5f8ef 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -200,6 +200,10 @@ class VideoMetadata(BaseModel): description="Field represents FPS value (if possible to be retrieved)", default=None, ) + measured_fps: Optional[float] = Field( + description="Field represents measured FPS of live stream", + default=None, + ) comes_from_video_file: Optional[bool] = Field( description="Field is a flag telling if frame comes from video file or stream - " "if not possible to be determined - pass None", From 3aefe8dcc5496c8114b53919333ae7779aa0bc7d Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Thu, 14 Nov 2024 15:04:03 +0000 Subject: [PATCH 62/67] Log when downloading model weights --- inference/models/transformers/transformers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/inference/models/transformers/transformers.py b/inference/models/transformers/transformers.py index 3d19ecb36..c0a24c2bf 100644 --- a/inference/models/transformers/transformers.py +++ b/inference/models/transformers/transformers.py @@ -160,6 +160,7 @@ def get_infer_bucket_file_list(self) -> list: ] def download_model_artifacts_from_roboflow_api(self) -> None: + logger.info(f"Downloading model artifacts from Roboflow API for model {self.endpoint}") api_data = get_roboflow_model_data( api_key=self.api_key, model_id=self.endpoint, From bfb8a55fbfc981b0b3113cfaec69588baeb939e6 Mon Sep 17 00:00:00 2001 From: Peter Robicheaux Date: Thu, 14 Nov 2024 15:19:13 +0000 Subject: [PATCH 63/67] dont redownload, fix error message --- inference/models/transformers/transformers.py | 28 +++++++++++-------- 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/inference/models/transformers/transformers.py b/inference/models/transformers/transformers.py index c0a24c2bf..15043da48 100644 --- a/inference/models/transformers/transformers.py +++ b/inference/models/transformers/transformers.py @@ -154,13 +154,12 @@ def get_infer_bucket_file_list(self) -> list: "special_tokens_map.json", "generation_config.json", "tokenizer.json", - re.compile(r"model-\d{5}-of-\d{5}\.safetensors"), + re.compile(r"model.*\.safetensors"), "preprocessor_config.json", "tokenizer_config.json", ] def download_model_artifacts_from_roboflow_api(self) -> None: - logger.info(f"Downloading model artifacts from Roboflow API for model {self.endpoint}") api_data = get_roboflow_model_data( api_key=self.api_key, model_id=self.endpoint, @@ -183,16 +182,21 @@ def download_model_artifacts_from_roboflow_api(self) -> None: model_id=self.endpoint, ) if filename.endswith("tar.gz"): - subprocess.run( - [ - "tar", - "-xzf", - os.path.join(self.cache_dir, filename), - "-C", - self.cache_dir, - ], - check=True, - ) + try: + subprocess.run( + [ + "tar", + "-xzf", + os.path.join(self.cache_dir, filename), + "-C", + self.cache_dir, + ], + check=True, + ) + except subprocess.CalledProcessError as e: + raise ModelArtefactError( + f"Failed to extract model archive {filename}. Error: {str(e)}" + ) from e if perf_counter() - t1 > 120: logger.debug( From 3338767e80185d4bc99c3765e5056b3e82bd678a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 17:12:11 +0100 Subject: [PATCH 64/67] Attempt to fix the builds --- requirements/_requirements.txt | 4 ++-- requirements/requirements.cli.txt | 4 ++-- requirements/requirements.hosted.txt | 1 - requirements/requirements.jetson.txt | 2 +- requirements/requirements.parallel.txt | 2 +- requirements/requirements.sdk.http.txt | 2 +- 6 files changed, 7 insertions(+), 8 deletions(-) diff --git a/requirements/_requirements.txt b/requirements/_requirements.txt index 690314512..b627c2ff1 100644 --- a/requirements/_requirements.txt +++ b/requirements/_requirements.txt @@ -7,9 +7,9 @@ numpy<=1.26.4 opencv-python>=4.8.1.78,<=4.10.0.84 piexif~=1.1.3 pillow<11.0 -prometheus-fastapi-instrumentator~=7.0.0 +prometheus-fastapi-instrumentator<=6.0.0 redis~=5.0.0 -requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x +requests>=2.32.0,<3.0.0 rich~=13.0.0 supervision>=0.21.0,<=0.22.0 pybase64~=1.0.0 diff --git a/requirements/requirements.cli.txt b/requirements/requirements.cli.txt index d409889a3..16e5272a7 100644 --- a/requirements/requirements.cli.txt +++ b/requirements/requirements.cli.txt @@ -1,5 +1,5 @@ -requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x -docker==6.1.3 +requests>=2.32.0,<3.0.0 +docker>=7.0.0,<8.0.0 typer>=0.9.0,<=0.12.5 rich~=13.0.0 PyYAML~=6.0.0 diff --git a/requirements/requirements.hosted.txt b/requirements/requirements.hosted.txt index c031776b4..da447936e 100644 --- a/requirements/requirements.hosted.txt +++ b/requirements/requirements.hosted.txt @@ -1,3 +1,2 @@ pymemcache~=4.0.0 elasticache_auto_discovery~=1.0.0 -prometheus-fastapi-instrumentator~=7.0.0 \ No newline at end of file diff --git a/requirements/requirements.jetson.txt b/requirements/requirements.jetson.txt index 703e9d485..c67662ca5 100644 --- a/requirements/requirements.jetson.txt +++ b/requirements/requirements.jetson.txt @@ -1,4 +1,4 @@ pypdfium2~=4.0.0 -jupyterlab~=4.0.0 +jupyterlab>=4.3.0,<5.0.0 PyYAML~=6.0.0 onnxruntime-gpu>=1.15.1,<1.20.0 diff --git a/requirements/requirements.parallel.txt b/requirements/requirements.parallel.txt index 0ad196d66..6c576396f 100644 --- a/requirements/requirements.parallel.txt +++ b/requirements/requirements.parallel.txt @@ -1,2 +1,2 @@ -celery~=5.0.0 +celery>=5.4.0,<6.0.0 gunicorn~=23.0.0 \ No newline at end of file diff --git a/requirements/requirements.sdk.http.txt b/requirements/requirements.sdk.http.txt index 1b08ae9d8..74f4e735e 100644 --- a/requirements/requirements.sdk.http.txt +++ b/requirements/requirements.sdk.http.txt @@ -1,4 +1,4 @@ -requests>=2.26.0,<2.32.0 # newer requests breaks docker which would need to be bumped to 7.x.x +requests>=2.32.0,<3.0.0 dataclasses-json~=0.6.0 opencv-python>=4.8.1.78,<=4.10.0.84 pillow>=9.0.0,<11.0 From efbd62d338261580af94ed0ac3fb450618f45c27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Thu, 14 Nov 2024 17:40:44 +0100 Subject: [PATCH 65/67] Fix stream management build --- ...ockerfile.onnx.jetson.5.1.1.stream_manager | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager index 4b86aa06b..a5ee7a545 100644 --- a/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager +++ b/docker/dockerfiles/Dockerfile.onnx.jetson.5.1.1.stream_manager @@ -6,6 +6,8 @@ ENV LANG en_US.UTF-8 RUN apt-get update -y && apt-get install -y \ lshw \ git \ + python3.9 \ + python3.9-dev \ python3-pip \ python3-matplotlib \ gfortran \ @@ -24,19 +26,19 @@ COPY requirements/requirements.clip.txt \ requirements/_requirements.txt \ ./ -RUN pip3 install --ignore-installed PyYAML && rm -rf ~/.cache/pip +RUN python3.9 -m pip install --ignore-installed PyYAML && rm -rf ~/.cache/pip # We needed to take statically compiled library for last known stable build and put it into hosting # That was due to faulty builds started 26.06.2024, probably due to release of new version # of pybind11, which gets automatically pulled while build of zxing_cpp library making # cmake to fail -RUN wget https://storage.googleapis.com/roboflow-tests-assets/zxing_cpp_library_compiled_for_inference_v0.12.1_python_3.8.tar.gz \ - && tar -xvzf zxing_cpp_library_compiled_for_inference_v0.12.1_python_3.8.tar.gz \ - && mv zxing_cpp-2.2.0.dist-info /usr/local/lib/python3.8/dist-packages/zxing_cpp-2.2.0.dist-info \ - && mv zxingcpp.cpython-38-aarch64-linux-gnu.so /usr/local/lib/python3.8/dist-packages/ \ - && rm zxing_cpp_library_compiled_for_inference_v0.12.1_python_3.8.tar.gz +RUN wget https://storage.googleapis.com/roboflow-tests-assets/zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz \ + && tar -xvzf zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz \ + && mv zxing_cpp-2.2.0.dist-info /usr/local/lib/python3.9/dist-packages/zxing_cpp-2.2.0.dist-info \ + && mv zxingcpp.cpython-39-aarch64-linux-gnu.so /usr/local/lib/python3.9/dist-packages/ \ + && rm zxing_cpp_library_compiled_for_inference_v0.12.1.tar.gz -RUN pip3 install --upgrade pip && pip3 install \ +RUN python3.9 -m pip install --upgrade pip && python3.9 -m pip install \ -r _requirements.txt \ -r requirements.clip.txt \ -r requirements.http.txt \ @@ -44,11 +46,11 @@ RUN pip3 install --upgrade pip && pip3 install \ --upgrade \ && rm -rf ~/.cache/pip -RUN pip3 uninstall --yes onnxruntime -RUN wget https://nvidia.box.com/shared/static/iizg3ggrtdkqawkmebbfixo7sce6j365.whl -O onnxruntime_gpu-1.16.0-cp38-cp38-linux_aarch64.whl -RUN pip3 install onnxruntime_gpu-1.16.0-cp38-cp38-linux_aarch64.whl "opencv-python-headless<4.3" \ +RUN python3.9 -m pip uninstall --yes onnxruntime +RUN wget https://nvidia.box.com/shared/static/67zek28z497hs9aev7xg2c1wngdeyv4h.whl -O onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl +RUN python3.9 -m pip install onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl "opencv-python-headless>4" \ && rm -rf ~/.cache/pip \ - && rm onnxruntime_gpu-1.16.0-cp38-cp38-linux_aarch64.whl + && rm onnxruntime_gpu-1.16.0-cp39-cp39-linux_aarch64.whl WORKDIR /app/ COPY inference inference @@ -66,4 +68,4 @@ ENV WORKFLOWS_STEP_EXECUTION_MODE=local ENV WORKFLOWS_MAX_CONCURRENT_STEPS=1 ENV SUPERVISON_DEPRECATION_WARNING=0 -ENTRYPOINT ["python3", "-m", "inference.enterprise.stream_management.manager.app"] +ENTRYPOINT ["python3.9", "-m", "inference.enterprise.stream_management.manager.app"] From 1d0e796bed2e3bf09f3c3a66bc0f6332789c53a9 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:12:26 +0100 Subject: [PATCH 66/67] Update docstring of VideoFrame; minor updates to docs --- docs/workflows/internal_data_types.md | 3 +++ docs/workflows/video_processing/overview.md | 2 +- inference/core/interfaces/camera/entities.py | 3 ++- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/workflows/internal_data_types.md b/docs/workflows/internal_data_types.md index 8e4d5921f..d2494fd8d 100644 --- a/docs/workflows/internal_data_types.md +++ b/docs/workflows/internal_data_types.md @@ -284,6 +284,9 @@ def inspect_vide_metadata(video_metadata: VideoMetadata) -> None: # Field represents FPS value (if possible to be retrieved) (optional) print(video_metadata.fps) + # Field represents measured FPS of live stream (optional) + print(video_metadata.measured_fps) + # Field is a flag telling if frame comes from video file or stream. # If not possible to be determined - None print(video_metadata.comes_from_video_file) diff --git a/docs/workflows/video_processing/overview.md b/docs/workflows/video_processing/overview.md index 20d46fa2d..45f8a65ac 100644 --- a/docs/workflows/video_processing/overview.md +++ b/docs/workflows/video_processing/overview.md @@ -5,7 +5,7 @@ video-specific blocks (e.g., the ByteTracker block) and continue to dedicate eff their performance and robustness. The current state of this work is as follows: * We've introduced the `WorkflowVideoMetadata` input to store metadata related to video frames, -including FPS, timestamp, video source identifier, and file/stream flags. While this may not be the final approach +including declared FPS, measured FPS, timestamp, video source identifier, and file/stream flags. While this may not be the final approach for handling video metadata, it allows us to build stateful video-processing blocks at this stage. If your Workflow includes any blocks requiring input of kind `video_metadata`, you must define this input in your Workflow. The metadata functions as a batch-oriented parameter, treated by the Execution Engine in the same diff --git a/inference/core/interfaces/camera/entities.py b/inference/core/interfaces/camera/entities.py index 95d56cbc5..218240d9c 100644 --- a/inference/core/interfaces/camera/entities.py +++ b/inference/core/interfaces/camera/entities.py @@ -55,7 +55,8 @@ class VideoFrame: frame_timestamp (FrameTimestamp): The timestamp when the frame was captured. source_id (int): The index of the video_reference element which was passed to InferencePipeline for this frame (useful when multiple streams are passed to InferencePipeline). - fps (Optional[float]): FPS of source (if possible to be acquired) + fps (Optional[float]): declared FPS of source (if possible to be acquired) + measured_fps (Optional[float]): measured FPS of live stream comes_from_video_file (Optional[bool]): flag to determine if frame comes from video file """ From 580f0a2c20d57b78ea4d74a789817ccd2cebb093 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:24:33 +0100 Subject: [PATCH 67/67] update docs --- inference/core/workflows/execution_engine/entities/types.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/inference/core/workflows/execution_engine/entities/types.py b/inference/core/workflows/execution_engine/entities/types.py index f9d3d239d..aac3a76c7 100644 --- a/inference/core/workflows/execution_engine/entities/types.py +++ b/inference/core/workflows/execution_engine/entities/types.py @@ -78,6 +78,7 @@ def __hash__(self) -> int: "video_identifier": "rtsp://some.com/stream1", "comes_from_video_file": False, "fps": 23.99, + "measured_fps": 20.05, "frame_number": 24, "frame_timestamp": "2024-08-21T11:13:44.313999", } @@ -116,6 +117,7 @@ def __hash__(self) -> int: "video_identifier": "rtsp://some.com/stream1", "comes_from_video_file": False, "fps": 23.99, + "measured_fps": 20.05, "frame_number": 24, "frame_timestamp": "2024-08-21T11:13:44.313999", }