From 54e5d703f012408448bc3015663ed1b509d6921a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20P=C4=99czek?= Date: Tue, 12 Nov 2024 12:42:13 +0100 Subject: [PATCH] Adjust Roboflow models to use primairly base64 payloads --- .../models/foundation/clip_comparison/v2.py | 2 +- .../core_steps/models/foundation/ocr/v1.py | 2 +- .../models/foundation/yolo_world/v1.py | 2 +- .../models/roboflow/instance_segmentation/v1.py | 2 +- .../models/roboflow/keypoint_detection/v1.py | 2 +- .../roboflow/multi_class_classification/v1.py | 2 +- .../roboflow/multi_label_classification/v1.py | 2 +- .../models/roboflow/object_detection/v1.py | 2 +- .../workflows/execution_engine/entities/base.py | 2 +- inference_cli/lib/cloud_adapter.py | 6 +++++- .../unit_tests/core/cache/test_serializers.py | 16 +++++++++------- .../unit_tests/usage_tracking/test_collector.py | 12 +++++++++--- .../core_steps/analytics/test_line_counter_v2.py | 14 ++++++++++++-- 13 files changed, 44 insertions(+), 22 deletions(-) diff --git a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py index 165b020cc..5132b36dc 100644 --- a/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py +++ b/inference/core/workflows/core_steps/models/foundation/clip_comparison/v2.py @@ -207,7 +207,7 @@ def run_remotely( tasks = [ partial( client.clip_compare, - subject=single_image.numpy_image, + subject=single_image.base64_image, prompt=classes, clip_version=version, ) diff --git a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py index 0b98c263d..de36a869f 100644 --- a/inference/core/workflows/core_steps/models/foundation/ocr/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/ocr/v1.py @@ -169,7 +169,7 @@ def run_remotely( max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS, ) client.configure(configuration) - non_empty_inference_images = [i.numpy_image for i in images] + non_empty_inference_images = [i.base64_image for i in images] predictions = client.ocr_image( inference_input=non_empty_inference_images, ) diff --git a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py index ce9be725c..3ec58190f 100644 --- a/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py +++ b/inference/core/workflows/core_steps/models/foundation/yolo_world/v1.py @@ -217,7 +217,7 @@ def run_remotely( client.configure(inference_configuration=configuration) if WORKFLOWS_REMOTE_API_TARGET == "hosted": client.select_api_v0() - inference_images = [i.to_inference_format(numpy_preferred=True) for i in images] + inference_images = [i.to_inference_format() for i in images] image_sub_batches = list( make_batches( iterable=inference_images, diff --git a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py index c3480c4cf..3e967a396 100644 --- a/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/instance_segmentation/v1.py @@ -339,7 +339,7 @@ def run_remotely( source="workflow-execution", ) client.configure(inference_configuration=client_config) - inference_images = [i.numpy_image for i in images] + inference_images = [i.base64_image for i in images] predictions = client.infer( inference_input=inference_images, model_id=model_id, diff --git a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py index b9d80bfed..7e8d0eed1 100644 --- a/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/keypoint_detection/v1.py @@ -324,7 +324,7 @@ def run_remotely( source="workflow-execution", ) client.configure(inference_configuration=client_config) - inference_images = [i.numpy_image for i in images] + inference_images = [i.base64_image for i in images] predictions = client.infer( inference_input=inference_images, model_id=model_id, diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py index eca510831..77cba7388 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_class_classification/v1.py @@ -230,7 +230,7 @@ def run_remotely( source="workflow-execution", ) client.configure(inference_configuration=client_config) - non_empty_inference_images = [i.numpy_image for i in images] + non_empty_inference_images = [i.base64_image for i in images] predictions = client.infer( inference_input=non_empty_inference_images, model_id=model_id, diff --git a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py index 78b41b32b..c2984061f 100644 --- a/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/multi_label_classification/v1.py @@ -230,7 +230,7 @@ def run_remotely( source="workflow-execution", ) client.configure(inference_configuration=client_config) - non_empty_inference_images = [i.numpy_image for i in images] + non_empty_inference_images = [i.base64_image for i in images] predictions = client.infer( inference_input=non_empty_inference_images, model_id=model_id, diff --git a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py index ab8b84f26..70d02b26c 100644 --- a/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py +++ b/inference/core/workflows/core_steps/models/roboflow/object_detection/v1.py @@ -306,7 +306,7 @@ def run_remotely( source="workflow-execution", ) client.configure(inference_configuration=client_config) - non_empty_inference_images = [i.numpy_image for i in images] + non_empty_inference_images = [i.base64_image for i in images] predictions = client.infer( inference_input=non_empty_inference_images, model_id=model_id, diff --git a/inference/core/workflows/execution_engine/entities/base.py b/inference/core/workflows/execution_engine/entities/base.py index d09dccab0..a255bbff3 100644 --- a/inference/core/workflows/execution_engine/entities/base.py +++ b/inference/core/workflows/execution_engine/entities/base.py @@ -375,7 +375,7 @@ def base64_image(self) -> str: return self._base64_image numpy_image = self.numpy_image self._base64_image = base64.b64encode( - encode_image_to_jpeg_bytes(numpy_image) + encode_image_to_jpeg_bytes(numpy_image, jpeg_quality=95) ).decode("ascii") return self._base64_image diff --git a/inference_cli/lib/cloud_adapter.py b/inference_cli/lib/cloud_adapter.py index 82f47e034..8f6e7a607 100644 --- a/inference_cli/lib/cloud_adapter.py +++ b/inference_cli/lib/cloud_adapter.py @@ -83,14 +83,18 @@ """, } + def check_sky_installed(): try: global sky import sky except ImportError as e: - print("Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'") + print( + "Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'" + ) raise e + def _random_char(y): return "".join(random.choice(string.ascii_lowercase) for x in range(y)) diff --git a/tests/inference/unit_tests/core/cache/test_serializers.py b/tests/inference/unit_tests/core/cache/test_serializers.py index 8c982f6de..0d294e31b 100644 --- a/tests/inference/unit_tests/core/cache/test_serializers.py +++ b/tests/inference/unit_tests/core/cache/test_serializers.py @@ -1,9 +1,11 @@ import os from unittest.mock import MagicMock + import pytest + from inference.core.cache.serializers import ( - to_cachable_inference_item, build_condensed_response, + to_cachable_inference_item, ) from inference.core.entities.requests.inference import ( ClassificationInferenceRequest, @@ -11,16 +13,16 @@ ) from inference.core.entities.responses.inference import ( ClassificationInferenceResponse, - MultiLabelClassificationInferenceResponse, + ClassificationPrediction, InstanceSegmentationInferenceResponse, + InstanceSegmentationPrediction, + Keypoint, KeypointsDetectionInferenceResponse, + KeypointsPrediction, + MultiLabelClassificationInferenceResponse, + MultiLabelClassificationPrediction, ObjectDetectionInferenceResponse, ObjectDetectionPrediction, - ClassificationPrediction, - MultiLabelClassificationPrediction, - InstanceSegmentationPrediction, - KeypointsPrediction, - Keypoint, Point, ) diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py index bbbc027cb..a25553c54 100644 --- a/tests/inference/unit_tests/usage_tracking/test_collector.py +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -764,7 +764,7 @@ def test_zip_usage_payloads_with_different_exec_session_ids(): "fps": 10, "exec_session_id": "session_2", }, - } + }, }, { "fake_api1_hash": { @@ -831,7 +831,11 @@ def test_zip_usage_payloads_with_different_exec_session_ids(): def test_system_info_with_dedicated_deployment_id(): # given - system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01", dedicated_deployment_id="deployment01") + system_info = UsageCollector.system_info( + ip_address="w.x.y.z", + hostname="hostname01", + dedicated_deployment_id="deployment01", + ) # then expected_system_info = { @@ -845,7 +849,9 @@ def test_system_info_with_dedicated_deployment_id(): def test_system_info_with_no_dedicated_deployment_id(): # given - system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01") + system_info = UsageCollector.system_info( + ip_address="w.x.y.z", hostname="hostname01" + ) # then expected_system_info = { diff --git a/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py b/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py index 0ff8ddfb6..eb6e8797b 100644 --- a/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py +++ b/tests/workflows/unit_tests/core_steps/analytics/test_line_counter_v2.py @@ -63,8 +63,18 @@ def test_line_counter() -> None: ) # then - assert frame1_result == {"count_in": 0, "count_out": 0, "detections_in": frame1_detections[[False, False, False, False]], "detections_out": frame1_detections[[False, False, False, False]]} - assert frame2_result == {"count_in": 1, "count_out": 1, "detections_in": frame2_detections[[True, False, False, False]], "detections_out": frame2_detections[[False, True, False, False]]} + assert frame1_result == { + "count_in": 0, + "count_out": 0, + "detections_in": frame1_detections[[False, False, False, False]], + "detections_out": frame1_detections[[False, False, False, False]], + } + assert frame2_result == { + "count_in": 1, + "count_out": 1, + "detections_in": frame2_detections[[True, False, False, False]], + "detections_out": frame2_detections[[False, True, False, False]], + } def test_line_counter_no_trackers() -> None: