diff --git a/.dockerignore b/.dockerignore index e88d499f..1d4e143e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1 +1,20 @@ +# Python cache files +__pycache__ +*.pyc + +# IDE settings +.vscode/ + +# Version control +.git/ + +# Distribution / packaging +build/ +dist/ +*.egg-info/ + +# Virtual environments +.venv + +# Testing /tests/manual/data diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 9712ca24..70f0f2a7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -27,10 +27,8 @@ jobs: run: | python -m pip install --upgrade pip pip install ".[dev]" - - name: 🧹 Lint + - name: 🧹 Check code quality run: | make check_code_quality - - name: Check types with mypy - run: mypy . - - name: 🧪 Test + - name: 🧪 Run tests run: "python -m unittest" diff --git a/Makefile b/Makefile index 2cc78031..1d59d41f 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: style check_code_quality +.PHONY: style check_code_quality publish export PYTHONPATH = . check_dirs := roboflow @@ -10,6 +10,7 @@ style: check_code_quality: ruff format $(check_dirs) --check ruff check $(check_dirs) + mypy $(check_dirs) publish: python setup.py sdist bdist_wheel diff --git a/pyproject.toml b/pyproject.toml index 9c8bdec0..292292a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,9 +11,7 @@ target-version = "py38" line-length = 120 [tool.ruff.lint] -select = [ - "ALL", -] +select = ["ALL"] ignore = [ "A", "ANN", diff --git a/roboflow/__init__.py b/roboflow/__init__.py index 10a6422e..f31b4347 100644 --- a/roboflow/__init__.py +++ b/roboflow/__init__.py @@ -3,6 +3,7 @@ import sys import time from getpass import getpass +from pathlib import Path from urllib.parse import urlparse import requests @@ -59,27 +60,16 @@ def check_key(api_key, model, notebook, num_retries=0): return "onboarding" -def auth(api_key): - r = check_key(api_key) - w = r["workspace"] - - return Roboflow(api_key, w) - - def login(workspace=None, force=False): os_name = os.name if os_name == "nt": - default_path = os.path.join(os.getenv("USERPROFILE"), "roboflow/config.json") + default_path = str(Path.home() / "roboflow" / "config.json") else: - default_path = os.path.join(os.getenv("HOME"), ".config/roboflow/config.json") + default_path = str(Path.home() / ".config" / "roboflow" / "config.json") # default configuration location - conf_location = os.getenv( - "ROBOFLOW_CONFIG_DIR", - default=default_path, - ) - + conf_location = os.getenv("ROBOFLOW_CONFIG_DIR", default=default_path) if os.path.isfile(conf_location) and not force: write_line("You are already logged into Roboflow. To make a different login," "run roboflow.login(force=True).") return None @@ -141,10 +131,7 @@ def initialize_roboflow(the_workspace=None): global active_workspace - conf_location = os.getenv( - "ROBOFLOW_CONFIG_DIR", - default=os.getenv("HOME") + "/.config/roboflow/config.json", - ) + conf_location = os.getenv("ROBOFLOW_CONFIG_DIR", default=str(Path.home() / ".config" / "roboflow" / "config.json")) if not os.path.isfile(conf_location): raise RuntimeError("To use this method, you must first login - run roboflow.login()") @@ -176,7 +163,7 @@ def load_model(model_url): project = path_parts[2] version = int(path_parts[-1]) else: - raise ("Model URL must be from either app.roboflow.com or universe.roboflow.com") + raise ValueError("Model URL must be from either app.roboflow.com or universe.roboflow.com") project = operate_workspace.project(project) version = project.version(version) @@ -204,7 +191,7 @@ def download_dataset(dataset_url, model_format, location=None): version = int(path_parts[-1]) the_workspace = path_parts[1] else: - raise ("Model URL must be from either app.roboflow.com or universe.roboflow.com") + raise ValueError("Model URL must be from either app.roboflow.com or universe.roboflow.com") operate_workspace = initialize_roboflow(the_workspace=the_workspace) project = operate_workspace.project(project) @@ -239,7 +226,7 @@ def auth(self): self.universe = True return self else: - w = r["workspace"] + w = r["workspace"] # type: ignore[arg-type] self.current_workspace = w return self diff --git a/roboflow/core/project.py b/roboflow/core/project.py index dcebdc79..1de9924c 100644 --- a/roboflow/core/project.py +++ b/roboflow/core/project.py @@ -230,7 +230,7 @@ def generate_version(self, settings): try: r_json = r.json() except Exception: - raise "Error when requesting to generate a new version for project." + raise RuntimeError("Error when requesting to generate a new version for project.") # if the generation succeeds, return the version that is being generated if r.status_code == 200: @@ -256,7 +256,7 @@ def train( speed=None, checkpoint=None, plot_in_notebook=False, - ) -> bool: + ): """ Ask the Roboflow API to train a previously exported version's dataset. @@ -503,7 +503,7 @@ def single_upload( sequence_size=sequence_size, **kwargs, ) - image_id = uploaded_image["id"] + image_id = uploaded_image["id"] # type: ignore[index] upload_retry_attempts = retry.retries except BaseException as e: uploaded_image = {"error": e} @@ -518,10 +518,10 @@ def single_upload( uploaded_annotation = rfapi.save_annotation( self.__api_key, project_url, - annotation_name, - annotation_str, + annotation_name, # type: ignore[type-var] + annotation_str, # type: ignore[type-var] image_id, - job_name=batch_name, + job_name=batch_name, # type: ignore[type-var] is_prediction=is_prediction, annotation_labelmap=annotation_labelmap, overwrite=annotation_overwrite, @@ -543,10 +543,10 @@ def _annotation_params(self, annotation_path): if isinstance(annotation_path, dict) and annotation_path.get("rawText"): annotation_name = annotation_path["name"] annotation_string = annotation_path["rawText"] - elif os.path.exists(annotation_path): - with open(annotation_path): - annotation_string = open(annotation_path).read() - annotation_name = os.path.basename(annotation_path) + elif os.path.exists(annotation_path): # type: ignore[arg-type] + with open(annotation_path): # type: ignore[arg-type] + annotation_string = open(annotation_path).read() # type: ignore[arg-type] + annotation_name = os.path.basename(annotation_path) # type: ignore[arg-type] elif self.type == "classification": print(f"-> using {annotation_path} as classname for classification project") annotation_string = annotation_path diff --git a/roboflow/core/version.py b/roboflow/core/version.py index a439db69..00359cf6 100644 --- a/roboflow/core/version.py +++ b/roboflow/core/version.py @@ -766,7 +766,7 @@ def bar_progress(current, total, width=80): # write the zip file to the desired location with open(location + "/roboflow.zip", "wb") as f: - total_length = int(response.headers.get("content-length")) + total_length = int(response.headers.get("content-length")) # type: ignore[arg-type] desc = None if TQDM_DISABLE else f"Downloading Dataset Version Zip in {location} to {format}:" for chunk in tqdm( response.iter_content(chunk_size=1024), diff --git a/roboflow/core/workspace.py b/roboflow/core/workspace.py index 247e04f9..b04f92aa 100644 --- a/roboflow/core/workspace.py +++ b/roboflow/core/workspace.py @@ -3,7 +3,7 @@ import json import os import sys -from typing import List +from typing import Any, List import numpy as np import requests @@ -179,7 +179,7 @@ def two_stage( print(self.project(first_stage_model_name)) # perform first inference - predictions = stage_one_model.predict(image) + predictions = stage_one_model.predict(image) # type: ignore[attribute-error] if stage_one_project.type == "object-detection" and stage_two_project == "classification": # interact with each detected object from stage one inference results @@ -199,7 +199,7 @@ def two_stage( croppedImg.save("./temp.png") # capture results of second stage inference from cropped image - results.append(stage_two_model.predict("./temp.png")[0]) + results.append(stage_two_model.predict("./temp.png")[0]) # type: ignore[attribute-error] # delete the written image artifact try: @@ -244,7 +244,7 @@ def two_stage_ocr( stage_one_model = stage_one_project.version(first_stage_model_version).model # perform first inference - predictions = stage_one_model.predict(image) + predictions = stage_one_model.predict(image) # type: ignore[attribute-error] # interact with each detected object from stage one inference results if stage_one_project.type == "object-detection": @@ -391,7 +391,7 @@ def active_learning( upload_destination: str = "", conditionals: dict = {}, use_localhost: bool = False, - ) -> str: + ) -> Any: """perform inference on each image in directory and upload based on conditions @params: raw_data_location: (str) = folder of frames to be processed @@ -470,7 +470,7 @@ def active_learning( print(image2 + " --> similarity too high to --> " + image1) continue # skip this image if too similar or counter hits limit - predictions = inference_model.predict(image).json()["predictions"] + predictions = inference_model.predict(image).json()["predictions"] # type: ignore[attribute-error] # collect all predictions to return to user at end prediction_results.append({"image": image, "predictions": predictions}) diff --git a/roboflow/models/classification.py b/roboflow/models/classification.py index 10c45c85..c482fdaf 100644 --- a/roboflow/models/classification.py +++ b/roboflow/models/classification.py @@ -63,7 +63,7 @@ def __init__( print(f"initalizing local classification model hosted at : {local}") self.base_url = local - def predict(self, image_path, hosted=False): + def predict(self, image_path, hosted=False): # type: ignore[override] """ Run inference on an image. diff --git a/roboflow/models/inference.py b/roboflow/models/inference.py index 307eb704..bbbc5be6 100644 --- a/roboflow/models/inference.py +++ b/roboflow/models/inference.py @@ -121,8 +121,7 @@ def predict(self, image_path, prediction_type=None, **kwargs): params["api_key"] = self.__api_key params.update(**kwargs) - - url = f"{self.api_url}?{urllib.parse.urlencode(params)}" + url = f"{self.api_url}?{urllib.parse.urlencode(params)}" # type: ignore[attr-defined] response = requests.post(url, **request_kwargs) response.raise_for_status() @@ -390,7 +389,7 @@ def download(self, format="pt", location="."): # write the zip file to the desired location with open(location + "/weights.pt", "wb") as f: - total_length = int(response.headers.get("content-length")) + total_length = int(response.headers.get("content-length")) # type: ignore[arg-type] for chunk in tqdm( response.iter_content(chunk_size=1024), desc=f"Downloading weights to {location}/weights.pt", diff --git a/roboflow/models/instance_segmentation.py b/roboflow/models/instance_segmentation.py index ba715e42..b26c1f36 100644 --- a/roboflow/models/instance_segmentation.py +++ b/roboflow/models/instance_segmentation.py @@ -35,7 +35,7 @@ def __init__( self.colors = {} if colors is None else colors self.preprocessing = {} if preprocessing is None else preprocessing - def predict(self, image_path, confidence=40): + def predict(self, image_path, confidence=40): # type: ignore[override] """ Infers detections based on image from a specified model and image path. diff --git a/roboflow/models/keypoint_detection.py b/roboflow/models/keypoint_detection.py index 2ccd6e64..a0e86561 100644 --- a/roboflow/models/keypoint_detection.py +++ b/roboflow/models/keypoint_detection.py @@ -58,7 +58,7 @@ def __init__( print(f"initalizing local keypoint detection model hosted at : {local}") self.base_url = local - def predict(self, image_path, hosted=False): + def predict(self, image_path, hosted=False): # type: ignore[override] """ Run inference on an image. diff --git a/roboflow/models/object_detection.py b/roboflow/models/object_detection.py index dca55b05..12dfa29d 100644 --- a/roboflow/models/object_detection.py +++ b/roboflow/models/object_detection.py @@ -125,7 +125,7 @@ def load_model( format=format, ) - def predict( + def predict( # type: ignore[override] self, image_path, hosted=False, @@ -175,6 +175,7 @@ def predict( self.__exception_check(image_path_check=image_path) resize = False + original_dimensions = None # If image is local image if not hosted: if isinstance(image_path, str): @@ -219,7 +220,7 @@ def predict( retval, buffer = cv2.imencode(".jpg", image_path) # Currently cv2.imencode does not properly return shape dimensions = buffer.shape - img_str = base64.b64encode(buffer) + img_str = base64.b64encode(buffer) # type: ignore[arg-type] img_str = img_str.decode("ascii") resp = requests.post( self.api_url, @@ -243,7 +244,7 @@ def predict( if self.format == "json": resp_json = resp.json() - if resize: + if resize and original_dimensions is not None: new_preds = [] for p in resp_json["predictions"]: p["x"] = int(p["x"] * (int(original_dimensions[0]) / int(self.preprocessing["resize"]["width"]))) @@ -310,8 +311,8 @@ def plot_one_box(x, img, color=None, label=None, line_thickness=None, colors=Non self.colors = {} if colors is None else colors - if label in colors.keys() and label is not None: - color = colors[label] + if label in self.colors and label is not None: + color = self.colors[label] color = color.lstrip("#") color = tuple(int(color[i : i + 2], 16) for i in (0, 2, 4)) else: @@ -391,7 +392,7 @@ def view(button): frame = cv2.flip(frame, 1) # if your camera reverses your image _, frame_upload = cv2.imencode(".jpeg", frame) - img_str = base64.b64encode(frame_upload) + img_str = base64.b64encode(frame_upload) # type: ignore[arg-type] img_str = img_str.decode("ascii") # post frame to the Roboflow API diff --git a/roboflow/util/image_utils.py b/roboflow/util/image_utils.py index 5b25e67b..618ab15a 100644 --- a/roboflow/util/image_utils.py +++ b/roboflow/util/image_utils.py @@ -40,7 +40,7 @@ def mask_image(image, encoded_mask, transparency=60): :param transparency: alpha transparency of masks for semantic overlays :returns: CV2 image / numpy.ndarray matrix """ - np_data = np.fromstring(base64.b64decode(encoded_mask), np.uint8) + np_data = np.fromstring(base64.b64decode(encoded_mask), np.uint8) # type: ignore[no-overload] mask = cv2.imdecode(np_data, cv2.IMREAD_UNCHANGED) # Fallback in case the API returns an incorrectly sized mask diff --git a/roboflow/util/prediction.py b/roboflow/util/prediction.py index a6bee5a3..f5a9364a 100644 --- a/roboflow/util/prediction.py +++ b/roboflow/util/prediction.py @@ -38,7 +38,7 @@ def plot_image(image_path): img = Image.open(io.BytesIO(response.content)) figure, axes = plt.subplots() - axes.imshow(img) + axes.imshow(img) # type: ignore[attr-defined] return figure, axes @@ -55,7 +55,7 @@ def plot_annotation(axes, prediction=None, stroke=1, transparency=60, colors=Non # Object Detection annotation colors = {} if colors is None else colors - + prediction = prediction or {} stroke_color = "r" if prediction["prediction_type"] == OBJECT_DETECTION_MODEL: @@ -283,6 +283,7 @@ def add_prediction(self, prediction=None): :param prediction: Prediction to add to the prediction group """ + prediction = prediction or {} # If not a Prediction object then do not allow into the prediction group # Also checks if prediction types are the same # (i.e. object detection predictions in object detection groups)