Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adjust Roboflow models to primarily use base64 payloads #798

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ def run_remotely(
tasks = [
partial(
client.clip_compare,
subject=single_image.numpy_image,
subject=single_image.base64_image,
prompt=classes,
clip_version=version,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def run_remotely(
max_concurrent_requests=WORKFLOWS_REMOTE_EXECUTION_MAX_STEP_CONCURRENT_REQUESTS,
)
client.configure(configuration)
non_empty_inference_images = [i.numpy_image for i in images]
non_empty_inference_images = [i.base64_image for i in images]
predictions = client.ocr_image(
inference_input=non_empty_inference_images,
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def run_remotely(
client.configure(inference_configuration=configuration)
if WORKFLOWS_REMOTE_API_TARGET == "hosted":
client.select_api_v0()
inference_images = [i.to_inference_format(numpy_preferred=True) for i in images]
inference_images = [i.to_inference_format() for i in images]
image_sub_batches = list(
make_batches(
iterable=inference_images,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def run_remotely(
source="workflow-execution",
)
client.configure(inference_configuration=client_config)
inference_images = [i.numpy_image for i in images]
inference_images = [i.base64_image for i in images]
predictions = client.infer(
inference_input=inference_images,
model_id=model_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def run_remotely(
source="workflow-execution",
)
client.configure(inference_configuration=client_config)
inference_images = [i.numpy_image for i in images]
inference_images = [i.base64_image for i in images]
predictions = client.infer(
inference_input=inference_images,
model_id=model_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def run_remotely(
source="workflow-execution",
)
client.configure(inference_configuration=client_config)
non_empty_inference_images = [i.numpy_image for i in images]
non_empty_inference_images = [i.base64_image for i in images]
predictions = client.infer(
inference_input=non_empty_inference_images,
model_id=model_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def run_remotely(
source="workflow-execution",
)
client.configure(inference_configuration=client_config)
non_empty_inference_images = [i.numpy_image for i in images]
non_empty_inference_images = [i.base64_image for i in images]
predictions = client.infer(
inference_input=non_empty_inference_images,
model_id=model_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ def run_remotely(
source="workflow-execution",
)
client.configure(inference_configuration=client_config)
non_empty_inference_images = [i.numpy_image for i in images]
non_empty_inference_images = [i.base64_image for i in images]
predictions = client.infer(
inference_input=non_empty_inference_images,
model_id=model_id,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ def base64_image(self) -> str:
return self._base64_image
numpy_image = self.numpy_image
self._base64_image = base64.b64encode(
encode_image_to_jpeg_bytes(numpy_image)
encode_image_to_jpeg_bytes(numpy_image, jpeg_quality=95)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably want to do whatever we do during the export process for training. I think it’s quality 70

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd vote to keep default compression used by opencv, if we introduce higher compression we will have users complaining that inference works differently when they run it locally vs when they make request to our platform..

).decode("ascii")
return self._base64_image

Expand Down
6 changes: 5 additions & 1 deletion inference_cli/lib/cloud_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,14 +83,18 @@
""",
}


def check_sky_installed():
try:
global sky
import sky
except ImportError as e:
print("Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'")
print(
"Please install cloud deploy dependencies with 'pip install inference[cloud-deploy]'"
)
raise e


def _random_char(y):
return "".join(random.choice(string.ascii_lowercase) for x in range(y))

Expand Down
16 changes: 9 additions & 7 deletions tests/inference/unit_tests/core/cache/test_serializers.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,28 @@
import os
from unittest.mock import MagicMock

import pytest

from inference.core.cache.serializers import (
to_cachable_inference_item,
build_condensed_response,
to_cachable_inference_item,
)
from inference.core.entities.requests.inference import (
ClassificationInferenceRequest,
ObjectDetectionInferenceRequest,
)
from inference.core.entities.responses.inference import (
ClassificationInferenceResponse,
MultiLabelClassificationInferenceResponse,
ClassificationPrediction,
InstanceSegmentationInferenceResponse,
InstanceSegmentationPrediction,
Keypoint,
KeypointsDetectionInferenceResponse,
KeypointsPrediction,
MultiLabelClassificationInferenceResponse,
MultiLabelClassificationPrediction,
ObjectDetectionInferenceResponse,
ObjectDetectionPrediction,
ClassificationPrediction,
MultiLabelClassificationPrediction,
InstanceSegmentationPrediction,
KeypointsPrediction,
Keypoint,
Point,
)

Expand Down
12 changes: 9 additions & 3 deletions tests/inference/unit_tests/usage_tracking/test_collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,7 @@ def test_zip_usage_payloads_with_different_exec_session_ids():
"fps": 10,
"exec_session_id": "session_2",
},
}
},
},
{
"fake_api1_hash": {
Expand Down Expand Up @@ -831,7 +831,11 @@ def test_zip_usage_payloads_with_different_exec_session_ids():

def test_system_info_with_dedicated_deployment_id():
# given
system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01", dedicated_deployment_id="deployment01")
system_info = UsageCollector.system_info(
ip_address="w.x.y.z",
hostname="hostname01",
dedicated_deployment_id="deployment01",
)

# then
expected_system_info = {
Expand All @@ -845,7 +849,9 @@ def test_system_info_with_dedicated_deployment_id():

def test_system_info_with_no_dedicated_deployment_id():
# given
system_info = UsageCollector.system_info(ip_address="w.x.y.z", hostname="hostname01")
system_info = UsageCollector.system_info(
ip_address="w.x.y.z", hostname="hostname01"
)

# then
expected_system_info = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,18 @@ def test_line_counter() -> None:
)

# then
assert frame1_result == {"count_in": 0, "count_out": 0, "detections_in": frame1_detections[[False, False, False, False]], "detections_out": frame1_detections[[False, False, False, False]]}
assert frame2_result == {"count_in": 1, "count_out": 1, "detections_in": frame2_detections[[True, False, False, False]], "detections_out": frame2_detections[[False, True, False, False]]}
assert frame1_result == {
"count_in": 0,
"count_out": 0,
"detections_in": frame1_detections[[False, False, False, False]],
"detections_out": frame1_detections[[False, False, False, False]],
}
assert frame2_result == {
"count_in": 1,
"count_out": 1,
"detections_in": frame2_detections[[True, False, False, False]],
"detections_out": frame2_detections[[False, True, False, False]],
}


def test_line_counter_no_trackers() -> None:
Expand Down