Skip to content

Commit

Permalink
Try to fix stability reg. test of detection + crop
Browse files Browse the repository at this point in the history
  • Loading branch information
PawelPeczek-Roboflow committed May 2, 2024
1 parent cf90648 commit d9ad139
Show file tree
Hide file tree
Showing 3 changed files with 59 additions and 14 deletions.
10 changes: 5 additions & 5 deletions inference/core/managers/decorators/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,12 @@ def __init__(self, model_manager: ModelManager):
"""Initializes the decorator with an instance of a ModelManager."""
self.model_manager = model_manager

def init_pingback(self):
self.model_manager.init_pingback()
# def init_pingback(self):
# self.model_manager.init_pingback()

@property
def pingback(self):
return self.model_manager.pingback
# @property
# def pingback(self):
# return self.model_manager.pingback

def add_model(
self, model_id: str, api_key: str, model_id_alias: Optional[str] = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,11 @@ class BlockManifest(WorkflowBlockManifest):


class DetectionOffsetBlock(WorkflowBlock):
# TODO: This block breaks parent coordinates :( We need to fix,
# as block now does not get predictions_parent_coordinates input
# which is not defined explicitly it can only offset predictions
# in "own" coords. We need to chane the way the coords transition
# is handled as current approach will never work.

@classmethod
def get_input_manifest(cls) -> Type[WorkflowBlockManifest]:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,14 @@
},
{
"type": "Crop",
"name": "crop",
"name": "cars_crops",
"image": "$inputs.image",
"predictions": "$steps.offset.predictions",
},
{
"type": "RoboflowObjectDetectionModel",
"name": "plates_detection",
"image": "$steps.crop.crops",
"image": "$steps.cars_crops.crops",
"model_id": "vehicle-registration-plates-trudk/2",
},
{
Expand All @@ -64,7 +64,7 @@
{
"type": "Crop",
"name": "plates_crops",
"image": "$steps.crop.crops",
"image": "$steps.cars_crops.crops",
"predictions": "$steps.plates_offset.predictions",
},
{
Expand All @@ -74,6 +74,16 @@
},
],
"outputs": [
{
"type": "JsonField",
"name": "cars_crops",
"selector": "$steps.cars_crops.crops",
},
{
"type": "JsonField",
"name": "plates_crops",
"selector": "$steps.plates_crops.crops",
},
{"type": "JsonField", "name": "plates_ocr", "selector": "$steps.ocr.result"},
],
}
Expand Down Expand Up @@ -106,11 +116,41 @@ async def test_static_crop_workflow_when_minimal_valid_input_provided(

# then
assert set(result.keys()) == {
"plates_ocr"
"plates_ocr",
"plates_crops",
"cars_crops",
}, "Expected all declared outputs to be delivered"
assert len(result["cars_crops"]) == 3, "Expected 3 cars to be detected"
assert np.allclose(
result["cars_crops"][0]["value"],
license_plate_image[475:666, 109:351, :],
atol=5,
), "Expected car to be detected exactly in coordinates matching reference run"
assert np.allclose(
result["cars_crops"][1]["value"],
license_plate_image[380:990, 761:1757, :],
atol=5,
), "Expected car to be detected exactly in coordinates matching reference run"
assert np.allclose(
result["cars_crops"][2]["value"],
license_plate_image[489:619, 417:588, :],
atol=5,
), "Expected car to be detected exactly in coordinates matching reference run"
assert np.allclose(
result["plates_crops"][0]["value"],
license_plate_image[475 + 94 : 475 + 162, 109 + 58 : 109 + 179, :],
atol=5,
), "Expected license plate to be detected exactly in coordinates matching reference run"
assert np.allclose(
result["plates_crops"][1]["value"],
license_plate_image[380 + 373 : 380 + 486, 761 + 593 : 761 + 873, :],
atol=5,
), "Expected license plate to be detected exactly in coordinates matching reference run"
assert np.allclose(
result["plates_crops"][2]["value"],
license_plate_image[489 + 56 : 489 + 118, 417 + 49 : 417 + 143, :],
atol=5,
), "Expected license plate to be detected exactly in coordinates matching reference run"
assert len(result["plates_ocr"]) == 3, "Expected 3 predictions with OCRed values"
assert result["plates_ocr"] == [
"",
"23948072",
"",
], "Expected OCR results to be as verified manually while creating the test. Two outputs are empty due to insufficient quality of OCR model"
# For some reason at different platform OCR gives different results, despite
# checking to operate on the same input images as in reference runs

0 comments on commit d9ad139

Please sign in to comment.