diff --git a/.gitignore b/.gitignore
index 710adca5..a81c8ee1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,3 @@
-test.py
-
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
diff --git a/.idea/.gitignore b/.idea/.gitignore
new file mode 100644
index 00000000..26d33521
--- /dev/null
+++ b/.idea/.gitignore
@@ -0,0 +1,3 @@
+# Default ignored files
+/shelf/
+/workspace.xml
diff --git a/.idea/inspectionProfiles/Project_Default.xml b/.idea/inspectionProfiles/Project_Default.xml
new file mode 100644
index 00000000..31671710
--- /dev/null
+++ b/.idea/inspectionProfiles/Project_Default.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/inspectionProfiles/profiles_settings.xml b/.idea/inspectionProfiles/profiles_settings.xml
new file mode 100644
index 00000000..105ce2da
--- /dev/null
+++ b/.idea/inspectionProfiles/profiles_settings.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
new file mode 100644
index 00000000..df3c898e
--- /dev/null
+++ b/.idea/misc.xml
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/.idea/modules.xml b/.idea/modules.xml
new file mode 100644
index 00000000..3e9159ed
--- /dev/null
+++ b/.idea/modules.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/roboflow-python.iml b/.idea/roboflow-python.iml
new file mode 100644
index 00000000..74d515a0
--- /dev/null
+++ b/.idea/roboflow-python.iml
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
new file mode 100644
index 00000000..94a25f7f
--- /dev/null
+++ b/.idea/vcs.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
+
\ No newline at end of file
diff --git a/README.md b/README.md
index bf90f096..78be5351 100644
--- a/README.md
+++ b/README.md
@@ -1,79 +1 @@
-# Roboflow Python Library
-
-This is a helper library to load your [Roboflow](https://roboflow.ai) datasets
-into your python scripts and Jupyter notebooks.
-
-## Requirements
-
-This package requires python >=3.6 and a (free)
-[Roboflow](https://roboflow.ai) account.
-
-## Installing
-
-#### With PIP
-```sh
-pip install roboflow
-```
-
-#### With Anaconda
-```sh
-conda install roboflow
-```
-
-## Setup
-
-The `roboflow` package works in conjunction with your
-[Roboflow](https://roboflow.ai) account.
-
-From your `Account` page, click `Roboflow Keys` to get your API key.
-You can then use the `roboflow` python package to manage downloading
-your datasets in various formats.
-
-```python
-import roboflow
-roboflow.auth("<>")
-info = roboflow.load("chess-sample", 1, "tfrecord")
-
-# dataset is now downloaded and unzipped in your current directory
-# and info contains the paths you need to load it into your favorite
-# machine learning libraries
-```
-
-By default the folder is named
-```
-${dataset-name}.${version-number}-${version-name}.${format}
-```
-(For example, `Chess Sample.v1-small-gray.coco`).
-
-The file hierarchy is three folders containing the `train`, `valid`, and `test`
-data you selected in the Roboflow upload flow (and the format you specified
-in `roboflow.load` above). There is also a `README.roboflow.txt` describing the preprocessing and augmentation steps and, optionally, a `README.dataset.txt`
-provided by the person who shared the dataset.
-
-
-
-## Doing Inference
-It's important to pre-process your images for inference the same way you
-pre-processed your training images. For this, get a pre-processor via the
-`roboflow.infer` method which will return a function you can use to pre-process
-your images.
-
-```python
-import roboflow
-roboflow.auth("<>")
-process = roboflow.preprocessor("chess-sample", 1)
-images = process.fromFile("example.jpg") # returns a numpy array (of 1 image, unless you used tiling)
-```
-
-## Benefits
-
-This package currently provides two main benefits over downloading and loading
-your datasets manually.
-
-1. If you have previously loaded your dataset, it will automatically use the local copy rather than re-downloading.
-2. You can dynamically choose the export format at runtime rather than export-time.
-
-## Roadmap
-
-We plan to include more features in the future to allow you (for example,
-to let you easily do inference on your trained models).
+# Roboflow Python Library
\ No newline at end of file
diff --git a/build.sh b/build.sh
deleted file mode 100644
index a82ea34a..00000000
--- a/build.sh
+++ /dev/null
@@ -1 +0,0 @@
-echo "Building"
diff --git a/img/file-layout.png b/img/file-layout.png
deleted file mode 100644
index f743eb64..00000000
Binary files a/img/file-layout.png and /dev/null differ
diff --git a/meta.yaml b/meta.yaml
index 14d1bf85..72caaade 100644
--- a/meta.yaml
+++ b/meta.yaml
@@ -11,4 +11,4 @@ requirements:
about:
license: Apache
license_file: LICENSE
- summary: Loader for Roboflow datasets.
+ summary: The official Roboflow Python Package
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..d59cf725
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+requires = [
+ "setuptools>=57",
+ "wheel"
+]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 00000000..f7ddf2d2
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,15 @@
+certifi==2021.5.30
+chardet==4.0.0
+cycler==0.10.0
+idna==2.10
+kiwisolver==1.3.1
+matplotlib==3.4.2
+numpy==1.21.0
+opencv-python==4.5.3.56
+Pillow==8.3.0
+pyparsing==2.4.7
+python-dateutil==2.8.1
+python-dotenv==0.18.0
+requests==2.25.1
+six==1.16.0
+urllib3==1.26.6
diff --git a/roboflow/__init__.py b/roboflow/__init__.py
index 54a8aede..cc494e5b 100644
--- a/roboflow/__init__.py
+++ b/roboflow/__init__.py
@@ -1,77 +1,53 @@
+import json
import time
+
import requests
-API_URL = "https://api.roboflow.ai"
-_token = None
-_token_expires = 0
+from roboflow.core.project import Project
-def token():
- global _token
- return _token
+TOKEN = None
+API_URL = "http://localhost:5000"
+TOKEN_EXPIRES = None
+USER_API_KEY = ""
-def auth(api_key):
- global _token
+def auth(api_key):
+ global TOKEN, TOKEN_EXPIRES
+ global USER_API_KEY
+ USER_API_KEY = api_key
response = requests.post(API_URL + "/token", data=({
"api_key": api_key
}))
- r = response.json();
- if "error" in r:
- raise RuntimeError(response.text)
-
- _token = r["token"]
- _token_expires = time.time() + r["expires_in"]
-
- return r
-
-def dataset(name):
- global _token
-
- if not _token:
- raise Exception("You must first auth with your API key to call this method.")
-
- response = requests.get(API_URL + "/dataset/" + name, params=({
- "access_token": _token
- }))
-
- r = response.json();
- if "error" in r:
- raise RuntimeError(response.text)
-
- return r
-
-def version(dataset_name, version_id):
- global _token
-
- if not _token:
- raise Exception("You must first auth with your API key to call this method.")
-
- response = requests.get(API_URL + "/dataset/" + dataset_name + '/' + str(version_id), params=({
- "access_token": _token
- }))
-
- r = response.json();
- if "error" in r:
+ r = response.json()
+ if "error" in r or response.status_code != 200:
raise RuntimeError(response.text)
- return r
+ TOKEN = r['token']
+ TOKEN_EXPIRES = r['expires_in']
+ return Roboflow(api_key, TOKEN, TOKEN_EXPIRES)
-def export(dataset_name, version_id, format):
- global _token
- if not _token:
- raise Exception("You must first auth with your API key to call this method.")
+class Roboflow():
+ def __init__(self, api_key, access_token, token_expires):
+ self.api_key = api_key
+ self.access_token = access_token
+ self.token_expires = token_expires
- response = requests.get(API_URL + "/dataset/" + dataset_name + '/' + str(version_id) + '/' + format, params=({
- "access_token": _token
- }))
+ def list_workspaces(self):
+ workspaces = requests.get(API_URL + '/workspaces?access_token=' + self.access_token).json()
+ print(json.dumps(workspaces, indent=2))
+ return workspaces
- r = response.json();
- if "error" in r:
- raise RuntimeError(response.text)
+ def load_workspace(self):
+ pass
- return r
+ def load(self, dataset_slug):
+ # TODO: Change endpoint once written
+ LOAD_ENDPOINT = "ENDPOINT_TO_GET_DATSET_INFO" + dataset_slug
+ response = requests.get(LOAD_ENDPOINT).json()
+ return Project(self.api_key, response['dataset_slug'], response['type'], response['exports'])
-def load(dataset, *args):
- print(f"loading {dataset} {args}")
+ def __str__(self):
+ json_value = {'api_key': self.api_key, 'auth_token': self.access_token, 'token_expires': self.token_expires}
+ return json.dumps(json_value, indent=2)
\ No newline at end of file
diff --git a/roboflow/archive/__init__.py b/roboflow/archive/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/roboflow/archive/plot.py b/roboflow/archive/plot.py
new file mode 100644
index 00000000..0181d4d3
--- /dev/null
+++ b/roboflow/archive/plot.py
@@ -0,0 +1,102 @@
+# import io
+# import os
+#
+# import numpy as np
+# import requests
+# from PIL import Image
+# import matplotlib.pyplot as plt
+# from matplotlib import patches
+#
+# from roboflow.util.image_utils import check_image_url
+# from roboflow.config import OBJECT_DETECTION_MODEL, CLASSIFICATION_MODEL
+#
+#
+# def __plot_image(image_path):
+# """
+# Helper method to plot image
+#
+# :param image_path: path of image to be plotted (can be hosted or local)
+# :return:
+# """
+# # Exception to check if image path exists
+# __exception_check(image_path_check=image_path)
+# # Try opening local image
+# try:
+# img = Image.open(image_path)
+# except OSError:
+# # Try opening Hosted image
+# response = requests.get(image_path)
+# img = Image.open(io.BytesIO(response.content))
+# # Plot image axes
+# figure, axes = plt.subplots()
+# axes.imshow(img)
+# return figure, axes
+#
+#
+# def __plot_annotation(axes, prediction=None, stroke=1):
+# """
+# Helper method to plot annotations
+#
+# :param axes:
+# :param prediction:
+# :return:
+# """
+# # Object Detection annotation
+# if prediction['prediction_type'] == OBJECT_DETECTION_MODEL:
+# # Get height, width, and center coordinates of prediction
+# if prediction is not None:
+# height = prediction['height']
+# width = prediction['width']
+# x = prediction['x']
+# y = prediction['y']
+# rect = patches.Rectangle((x - width / 2, y - height / 2), width, height,
+# linewidth=stroke, edgecolor='r', facecolor='none')
+# # Plot Rectangle
+# axes.add_patch(rect)
+# elif prediction['prediction_type'] == CLASSIFICATION_MODEL:
+# axes.set_title('Class: ' + prediction['top'] + " | Confidence: " + prediction['confidence'])
+#
+#
+# def plot_predictions(prediction=None, prediction_group=None, binary_data=None, stroke=1):
+# """
+#
+# :param image_path:
+# :param prediction:
+# :param prediction_group:
+# :return:
+# """
+# # Check if user has inputted prediction
+# if prediction is not None:
+# # Exception to check if image path exists
+# __exception_check(image_path_check=prediction['image_path'])
+# figure, axes = __plot_image(prediction['image_path'])
+#
+# __plot_annotation(axes, prediction, stroke)
+# plt.show()
+# # Check if user inputted prediction group
+# elif prediction_group is not None:
+# if len(prediction_group) > 0:
+# # Check if image path exists
+# __exception_check(image_path_check=prediction_group.base_image_path)
+# # Plot image if image path exists
+# figure, axes = __plot_image(prediction_group.base_image_path)
+# # Plot annotations in prediction group
+# for single_prediction in prediction_group:
+# __plot_annotation(axes, single_prediction, stroke)
+#
+# plt.show()
+# # Raise exception if no prediction or prediction group was specified
+# elif binary_data is not None:
+# image_stream = io.BytesIO(binary_data)
+# pil_image = Image.open(image_stream)
+# plt.imshow(np.asarray(pil_image))
+# plt.show()
+# else:
+# raise Exception("No Prediction, Prediction Group, or Binary Data Specified")
+#
+#
+# def __exception_check(image_path_check=None):
+# # Check if Image path exists exception check (for both hosted URL and local image)
+# if image_path_check is not None:
+# if not os.path.exists(image_path_check) and not check_image_url(image_path_check):
+# raise Exception("Image does not exist at " + image_path_check + "!")
diff --git a/roboflow/config.py b/roboflow/config.py
new file mode 100644
index 00000000..15811943
--- /dev/null
+++ b/roboflow/config.py
@@ -0,0 +1,3 @@
+OBJECT_DETECTION_MODEL = "ObjectDetectionModel"
+CLASSIFICATION_MODEL = "ClassificationModel"
+PREDICTION_OBJECT = "Prediction"
\ No newline at end of file
diff --git a/roboflow/core/__init__.py b/roboflow/core/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/roboflow/core/project.py b/roboflow/core/project.py
new file mode 100644
index 00000000..8534dae7
--- /dev/null
+++ b/roboflow/core/project.py
@@ -0,0 +1,92 @@
+import base64
+import io
+import os
+import pathlib
+import urllib
+
+import cv2
+import requests
+from PIL import Image
+
+from roboflow.models.classification import ClassificationModel
+from roboflow.models.object_detection import ObjectDetectionModel
+
+
+class Project():
+ def __init__(self, api_key, dataset_slug, type, exports):
+ # TODO: Write JS endpoint to get all this Project info
+ self.api_key = api_key
+ self.dataset_slug = dataset_slug
+ self.type = type
+ # List of all versions
+ self.exports = exports
+
+ def model(self, version):
+ if version not in self.exports:
+ raise RuntimeError(
+ version + " is an invalid version; please export a different version from " + str(self.exports))
+ # TODO: Write JS endpoint to get model info
+ # Check whether model exists before initializing model
+ MODEL_INFO_ENDPOINT = "" + version
+ model_info = requests.get(MODEL_INFO_ENDPOINT).json()
+ if not model_info['exists']:
+ raise RuntimeError("Model does not exist for this version (" + version + ")")
+
+ if self.type == "object-detection":
+ return ObjectDetectionModel(self.api_key, self.dataset_slug, version)
+ elif self.type == "classification":
+ return ClassificationModel(self.api_key, self.dataset_slug, version)
+
+ def upload(self, image_path, annotation_path=None, hosted_image=False, split='train'):
+ success = False
+ image_id = None
+ image_type = pathlib.Path(image_path).suffix
+ if not hosted_image:
+ self.image_upload_url = "".join([
+ "https://api.roboflow.com/dataset/", self.dataset_slug, "/upload",
+ "?api_key=", self.api_key,
+ "&name=" + os.path.basename(image_path),
+ "&split=" + split
+ ])
+ image = cv2.cvtColor(image_path, cv2.COLOR_BGR2RGB)
+ pilImage = Image.fromarray(image)
+ # Convert to JPEG Buffer
+ buffered = io.BytesIO()
+ pilImage.save(buffered, quality=100, format="JPEG")
+
+ # Base 64 Encode
+ img_str = base64.b64encode(buffered.getvalue())
+ img_str = img_str.decode("ascii")
+
+ response = requests.post(self.image_upload_url, data=img_str, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+ })
+ success, image_id = response.json()['success'], response.json()['id']
+
+ else:
+ upload_url = "".join([
+ "https://api.roboflow.com/dataset/" + self.dataset_slug + "/upload",
+ "?api_key=" + self.api_key,
+ "&name=" + os.path.basename(image_path),
+ "&split=" + split,
+ "&image=" + urllib.parse.quote_plus(image_path)
+ ])
+
+ response = requests.post(upload_url)
+ success, image_id = response.json()['success'], response.json()['id']
+
+ # To upload annotations
+ if annotation_path is not None and image_id is not None and success:
+ annotation_string = open(annotation_path, "r").read()
+
+ self.annotation_upload_url = "".join([
+ "https://api.roboflow.com/dataset/", self.dataset_slug, "/annotate/", image_id,
+ "?api_key=", self.api_key,
+ "&name=" + os.path.basename(annotation_path)
+ ])
+
+ annotation_response = requests.post(self.annotation_upload_url, data=annotation_string, headers={
+ "Content-Type": "text/plain"
+ }).json()
+
+ success = annotation_response['success']
diff --git a/roboflow/models/__init__.py b/roboflow/models/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/roboflow/models/classification.py b/roboflow/models/classification.py
new file mode 100644
index 00000000..c1aeac8c
--- /dev/null
+++ b/roboflow/models/classification.py
@@ -0,0 +1,87 @@
+import base64
+import io
+import os
+
+import requests
+from PIL import Image
+
+from roboflow.util.prediction import PredictionGroup
+from roboflow.config import CLASSIFICATION_MODEL
+
+
+class ClassificationModel:
+ def __init__(self, api_key, dataset_slug=None, version=None):
+ """
+
+ :param api_key:
+ :param dataset_slug:
+ :param version:
+ """
+ # Instantiate different API URL parameters
+ self.api_key = api_key
+ self.dataset_slug = dataset_slug
+ self.version = version
+ self.base_url = "https://classify.roboflow.com/"
+
+ if dataset_slug is not None and version is not None:
+ self.__generate_url()
+
+ def predict(self, image_path, hosted=False):
+ """
+
+ :param image_path:
+ :param hosted:
+ :return:
+ """
+ self.__exception_check(image_path_check=image_path)
+ if not hosted:
+ # Load Image with PIL
+ image = Image.open(image_path).convert("RGB")
+
+ # Convert to JPEG Buffer
+ buffered = io.BytesIO()
+ image.save(buffered, quality=90, format="JPEG")
+
+ # Base 64 Encode
+ img_str = base64.b64encode(buffered.getvalue())
+ img_str = img_str.decode("ascii")
+
+ # POST to the API
+ resp = requests.post(self.api_url, data=img_str, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+ })
+
+ return PredictionGroup.create_prediction_group(resp.json(),
+ image_path=image_path,
+ prediction_type=CLASSIFICATION_MODEL)
+
+ def load_model(self, dataset_slug, version):
+ """
+
+ :param dataset_slug:
+ :param version:
+ :return:
+ """
+ self.dataset_slug = dataset_slug
+ self.version = version
+ self.__generate_url()
+
+ def __generate_url(self):
+ """
+
+ :return:
+ """
+ self.api_url = "".join([
+ self.base_url + self.dataset_slug + '/' + self.version,
+ "?api_key=" + self.api_key,
+ "&name=YOUR_IMAGE.jpg"])
+
+ def __exception_check(self, image_path_check=None):
+ """
+
+ :param image_path_check:
+ :return:
+ """
+ if image_path_check is not None:
+ if not os.path.exists(image_path_check):
+ raise Exception("Image does not exist at " + image_path_check + "!")
diff --git a/roboflow/models/object_detection.py b/roboflow/models/object_detection.py
new file mode 100644
index 00000000..e063dba7
--- /dev/null
+++ b/roboflow/models/object_detection.py
@@ -0,0 +1,162 @@
+import base64
+import io
+import os
+
+import requests
+import urllib
+from PIL import Image
+
+from roboflow.config import OBJECT_DETECTION_MODEL
+from roboflow.util.prediction import PredictionGroup
+
+from roboflow.util.image_utils import check_image_url
+
+
+class ObjectDetectionModel:
+ def __init__(self, api_key, dataset_slug=None, version=None, local=False, classes=None, overlap=30, confidence=40,
+ stroke=1, labels=False, format="json"):
+ """
+ From Roboflow Docs:
+
+ :param api_key: Your API key (obtained via your workspace API settings page)
+ :param dataset_slug: The url-safe version of the dataset name. You can find it in the web UI by looking at
+ the URL on the main project view or by clicking the "Get curl command" button in the train results section of
+ your dataset version after training your model.
+ :param local: Boolean value dictating whether to use the local server or hosted API
+ :param version: The version number identifying the version of of your dataset
+ :param classes: Restrict the predictions to only those of certain classes. Provide as a comma-separated string.
+ :param overlap: The maximum percentage (on a scale of 0-100) that bounding box predictions of the same class are
+ allowed to overlap before being combined into a single box.
+ :param confidence: A threshold for the returned predictions on a scale of 0-100. A lower number will return
+ more predictions. A higher number will return fewer high-certainty predictions
+ :param stroke: The width (in pixels) of the bounding box displayed around predictions (only has an effect when
+ format is image)
+ :param labels: Whether or not to display text labels on the predictions (only has an effect when format is
+ image).
+ :param format: json - returns an array of JSON predictions. (See response format tab).
+ image - returns an image with annotated predictions as a binary blob with a Content-Type
+ of image/jpeg.
+ """
+ # Instantiate different API URL parameters
+ self.api_key = api_key
+ self.dataset_slug = dataset_slug
+ self.version = version
+ self.classes = classes
+ self.overlap = overlap
+ self.confidence = confidence
+ self.overlap = overlap
+ self.confidence = confidence
+ self.stroke = stroke
+ self.labels = labels
+ self.format = format
+
+ if not local:
+ self.base_url = "https://detect.roboflow.com/"
+ else:
+ self.base_url = "http://localhost:9001/"
+
+ # If dataset slug not none, instantiate API URL
+ if dataset_slug is not None and version is not None:
+ self.__generate_url()
+
+ def load_model(self, dataset_slug, version, local=None, classes=None, overlap=None, confidence=None,
+ stroke=None, labels=None, format=None):
+ """
+ Loads a Model based on a Model Endpoint
+
+ :param model_endpoint: This is the endpoint that is loaded into the api_url
+ """
+ self.dataset_slug = dataset_slug
+ self.version = version
+ self.__generate_url(local=local, classes=classes, overlap=overlap, confidence=confidence,
+ stroke=stroke, labels=labels, format=format)
+
+ def predict(self, image_path, hosted=False, format=None):
+ """
+ Infers detections based on image from specified model and image path
+
+ :param image_path: Path to image (can be local or hosted)
+ :param hosted: If image located on a hosted server, hosted should be True
+ :param format: output format from this method
+ :return: PredictionGroup --> a group of predictions based on Roboflow JSON response
+ """
+ self.__generate_url(format=format)
+ # Check if image exists at specified path or URL
+ self.__exception_check(image_path_check=image_path)
+
+ # If image is local image
+ if not hosted:
+ # Open Image in RGB Format
+ image = Image.open(image_path).convert("RGB")
+ # Create buffer
+ buffered = io.BytesIO()
+ image.save(buffered, quality=90, format="JPEG")
+ # Base64 encode image
+ img_str = base64.b64encode(buffered.getvalue())
+ img_str = img_str.decode("ascii")
+ # Post to API and return response
+ resp = requests.post(self.api_url, data=img_str, headers={
+ "Content-Type": "application/x-www-form-urlencoded"
+ })
+ else:
+ # Create API URL for hosted image (slightly different)
+ self.api_url += "&image=" + urllib.parse.quote_plus(image_path)
+
+ # POST to the API
+ resp = requests.post(self.api_url)
+ if resp.status_code == 403:
+ raise Exception("API Credentials not Authorized")
+ # Return a prediction group if JSON data
+ if self.format == "json":
+ return PredictionGroup.create_prediction_group(resp.json(),
+ image_path=image_path,
+ prediction_type=OBJECT_DETECTION_MODEL)
+ # Returns base64 encoded Data
+ elif self.format == "image":
+ return resp.content
+
+ def __exception_check(self, image_path_check=None):
+ # Check if Image path exists exception check (for both hosted URL and local image)
+ if image_path_check is not None:
+ if not os.path.exists(image_path_check) and not check_image_url(image_path_check):
+ raise Exception("Image does not exist at " + image_path_check + "!")
+
+ def __generate_url(self, local=None, classes=None, overlap=None, confidence=None,
+ stroke=None, labels=None, format=None):
+
+ # Reassign parameters if any parameters are changed
+ if local is not None:
+ if not local:
+ self.base_url = "https://detect.roboflow.com/"
+ else:
+ self.base_url = "http://localhost:9001/"
+ """
+ There is probably a better way to do this lol
+ TODO: Refactor this!
+ """
+ if classes is not None:
+ self.classes = classes
+ if overlap is not None:
+ self.overlap = overlap
+ if confidence is not None:
+ self.confidence = confidence
+ if stroke is not None:
+ self.stroke = stroke
+ if labels is not None:
+ self.labels = labels
+ if format is not None:
+ self.format = format
+
+ # Create the new API URL
+ self.api_url = "".join([
+ self.base_url + self.dataset_slug + '/' + self.version,
+ "?api_key=" + self.api_key,
+ "&name=YOUR_IMAGE.jpg",
+ "&overlap=" + str(self.overlap),
+ "&confidence=" + str(self.confidence),
+ "&stroke=" + str(self.stroke),
+ "&labels=" + str(self.labels).lower(),
+ "&format=" + self.format
+ ])
+ if self.classes is not None:
+ self.api_url += "&classes=" + self.classes
diff --git a/roboflow/upload.py b/roboflow/upload.py
new file mode 100644
index 00000000..e69de29b
diff --git a/roboflow/util/__init__.py b/roboflow/util/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/roboflow/util/image_utils.py b/roboflow/util/image_utils.py
new file mode 100644
index 00000000..03bb0b42
--- /dev/null
+++ b/roboflow/util/image_utils.py
@@ -0,0 +1,9 @@
+import requests
+
+
+def check_image_url(url):
+ if 'http://' in url or "https://" in url:
+ r = requests.head(url)
+ return r.status_code == requests.codes.ok
+
+ return False
diff --git a/roboflow/util/prediction.py b/roboflow/util/prediction.py
new file mode 100644
index 00000000..85acff8e
--- /dev/null
+++ b/roboflow/util/prediction.py
@@ -0,0 +1,236 @@
+import io
+import json
+import os
+import warnings
+import requests
+
+import matplotlib.pyplot as plt
+from matplotlib import patches
+from PIL import Image
+
+from roboflow.config import OBJECT_DETECTION_MODEL, PREDICTION_OBJECT, CLASSIFICATION_MODEL
+from roboflow.util.image_utils import check_image_url
+
+
+def exception_check(image_path_check=None):
+ # Check if Image path exists exception check (for both hosted URL and local image)
+ if image_path_check is not None:
+ if not os.path.exists(image_path_check) and not check_image_url(image_path_check):
+ raise Exception("Image does not exist at " + image_path_check + "!")
+
+
+def plot_image(image_path):
+ """
+ Helper method to plot image
+
+ :param image_path: path of image to be plotted (can be hosted or local)
+ :return:
+ """
+ # Exception to check if image path exists
+ exception_check(image_path_check=image_path)
+ # Try opening local image
+ try:
+ img = Image.open(image_path)
+ except OSError:
+ # Try opening Hosted image
+ response = requests.get(image_path)
+ img = Image.open(io.BytesIO(response.content))
+ # Plot image axes
+ figure, axes = plt.subplots()
+ axes.imshow(img)
+ return figure, axes
+
+
+def plot_annotation(axes, prediction=None, stroke=1):
+ """
+ Helper method to plot annotations
+
+ :param axes:
+ :param prediction:
+ :return:
+ """
+ # Object Detection annotation
+ if prediction['prediction_type'] == OBJECT_DETECTION_MODEL:
+ # Get height, width, and center coordinates of prediction
+ if prediction is not None:
+ height = prediction['height']
+ width = prediction['width']
+ x = prediction['x']
+ y = prediction['y']
+ rect = patches.Rectangle((x - width / 2, y - height / 2), width, height,
+ linewidth=stroke, edgecolor='r', facecolor='none')
+ # Plot Rectangle
+ axes.add_patch(rect)
+ elif prediction['prediction_type'] == CLASSIFICATION_MODEL:
+ axes.set_title('Class: ' + prediction['top'] + " | Confidence: " + prediction['confidence'])
+
+
+class Prediction:
+ def __init__(self, json_prediction, image_path, prediction_type=OBJECT_DETECTION_MODEL):
+ """
+ Generalized Prediction for both Object Detection and Classification Models
+
+ :param json_prediction:
+ :param image_path:
+ """
+ # Set image path in JSON prediction
+ json_prediction['image_path'] = image_path
+ json_prediction['prediction_type'] = prediction_type
+ self.json_prediction = json_prediction
+
+ def json(self):
+ return self.json_prediction
+
+ def plot(self, stroke=1):
+ # Exception to check if image path exists
+ exception_check(image_path_check=self['image_path'])
+ figure, axes = plot_image(self['image_path'])
+
+ plot_annotation(axes, self, stroke)
+ plt.show()
+
+ def save(self, path='predictions.jpg'):
+ if self['prediction_type'] == OBJECT_DETECTION_MODEL:
+ pass
+ elif self['prediction_type'] == CLASSIFICATION_MODEL:
+ pass
+
+ def __str__(self) -> str:
+ """
+ :return: JSON formatted string of prediction
+ """
+ # Pretty print the JSON prediction as a String
+ prediction_string = json.dumps(self.json_prediction, indent=2)
+ return prediction_string
+
+ def __getitem__(self, key):
+ """
+
+ :param key:
+ :return:
+ """
+ # Allows the prediction to be accessed like a dictionary
+ return self.json_prediction[key]
+
+ # Make representation equal to string value
+ __repr__ = __str__
+
+
+class PredictionGroup:
+ def __init__(self, *args):
+ """
+ :param args: The prediction(s) to be added to the prediction group
+ """
+ # List of predictions (core of the PredictionGroup)
+ self.predictions = []
+ # Base image path (path of image of first prediction in prediction group)
+ self.base_image_path = ''
+ # Base prediction type (prediction type of image of first prediction in prediction group)
+ self.base_prediction_type = ''
+ # Iterate through the arguments
+ for index, prediction in enumerate(args):
+ # Set base image path based on first prediction
+ if index == 0:
+ self.base_image_path = prediction['image_path']
+ self.base_prediction_type = prediction['prediction_type']
+ # If not a Prediction object then do not allow into the prediction group
+ self.__exception_check(is_prediction_check=prediction)
+ # Add prediction to prediction group otherwise
+ self.predictions.append(prediction)
+
+ def add_prediction(self, prediction=None):
+ """
+
+ :param prediction: Prediction to add to the prediction group
+ """
+ # If not a Prediction object then do not allow into the prediction group
+ # Also checks if prediction types are the same (i.e. object detection predictions in object detection groups)
+ self.__exception_check(is_prediction_check=prediction, prediction_type_check=prediction['prediction_type'])
+ # If there is more than one prediction and the prediction image path is
+ # not the group image path then warn user
+ if self.__len__() > 0:
+ self.__exception_check(image_path_check=prediction['image_path'])
+ # If the prediction group is empty, make the base image path of the prediction
+ elif self.__len__() == 0:
+ self.base_image_path = prediction['image_path']
+ # Append prediction to group
+ self.predictions.append(prediction)
+
+ def plot(self, stroke=1):
+ if len(self) > 0:
+ # Check if image path exists
+ exception_check(image_path_check=self.base_image_path)
+ # Plot image if image path exists
+ figure, axes = plot_image(self.base_image_path)
+ # Plot annotations in prediction group
+ for single_prediction in self:
+ plot_annotation(axes, single_prediction, stroke)
+
+ plt.show()
+
+ def save(self, path="predictions.jpg"):
+ # TODO: Implement save to save prediction as a image
+ if self['prediction_type'] == OBJECT_DETECTION_MODEL:
+ pass
+ elif self['prediction_type'] == CLASSIFICATION_MODEL:
+ pass
+
+ def __str__(self):
+ """
+
+ :return:
+ """
+ # final string to be returned for the prediction group
+ prediction_group_string = ""
+ # Iterate through the predictions and convert each prediction into a string format
+ for prediction in self.predictions:
+ prediction_group_string += str(prediction) + "\n\n"
+ # return the prediction group string
+ return prediction_group_string
+
+ def __getitem__(self, index):
+ # Allows prediction group to be accessed via an index
+ return self.predictions[index]
+
+ def __len__(self):
+ # Length of prediction based off of number of predictions
+ return len(self.predictions)
+
+ def __exception_check(self, is_prediction_check=None, image_path_check=None, prediction_type_check=None):
+ # Ensures only predictions can be added to a prediction group
+ if is_prediction_check is not None:
+ if type(is_prediction_check).__name__ is not PREDICTION_OBJECT:
+ raise Exception("Cannot add type " + type(is_prediction_check).__name__ + " to PredictionGroup")
+
+ # Warns user if predictions have different prediction types
+ if prediction_type_check is not None:
+ if self.__len__() > 0 and prediction_type_check != self.base_prediction_type:
+ warnings.warn(
+ "This prediction is a different type (" + prediction_type_check +
+ ") than the prediction group base type (" + self.base_prediction_type +
+ ")")
+
+ # Gives user warning that base path is not equal to image path
+ if image_path_check is not None:
+ if self.base_image_path != image_path_check:
+ warnings.warn(
+ "This prediction has a different image path (" + image_path_check +
+ ") than the prediction group base image path (" + self.base_image_path +
+ ")")
+
+ @staticmethod
+ def create_prediction_group(json_response, image_path, prediction_type):
+ """
+
+ :param prediction_type:
+ :param json_response: Based on Roboflow JSON Response from Inference API
+ :param model:
+ :param image_path:
+ :return:
+ """
+ prediction_list = []
+ for prediction in json_response['predictions']:
+ prediction = Prediction(prediction, image_path, prediction_type=prediction_type)
+ prediction_list.append(prediction)
+
+ return PredictionGroup(*prediction_list)
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..7637e4c9
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,18 @@
+[metadata]
+name = roboflow
+version = 0.0.3
+author = Roboflow
+author_email = help@roboflow.com
+description = The Official Python Package for Roboflow
+long_description = file: README.md
+long_description_content_type = text/markdown
+url = https://github.com/SamratSahoo/roboflow-python
+classifiers =
+ Programming Language :: Python :: 3
+ License :: OSI Approved :: MIT License
+ Operating System :: OS Independent
+
+[options]
+packages = find:
+python_requires = >=3.7
+include_package_data = True
\ No newline at end of file
diff --git a/setup.py b/setup.py
deleted file mode 100644
index e2dc4779..00000000
--- a/setup.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import setuptools
-
-with open("README.md", "r") as fh:
- long_description = fh.read()
-
-setuptools.setup(
- name="roboflow", # Replace with your own username
- version="0.0.2",
- author="Roboflow",
- author_email="devs@roboflow.ai",
- description="Loader for Roboflow datasets.",
- long_description=long_description,
- long_description_content_type="text/markdown",
- url="https://github.com/roboflow-ai/roboflow-python",
- packages=setuptools.find_packages(),
- classifiers=[
- "Development Status :: 1 - Planning",
- "Programming Language :: Python :: 3",
- "License :: OSI Approved :: Apache Software License",
- "Operating System :: OS Independent",
- "Intended Audience :: Developers",
- "Intended Audience :: Science/Research",
- "Topic :: Scientific/Engineering :: Image Recognition"
- ],
- keywords="roboflow datasets dataset download convert annotation annotations computer vision object detection classification",
- python_requires='>=3.6',
- install_requires=[
- "requests>=2.23"
- ]
-)
diff --git a/tests/environment.py b/tests/environment.py
new file mode 100644
index 00000000..e25c2a73
--- /dev/null
+++ b/tests/environment.py
@@ -0,0 +1,3 @@
+from dotenv import load_dotenv
+
+load_dotenv() # take environment variables from .env.
diff --git a/tests/rabbit.JPG b/tests/rabbit.JPG
new file mode 100644
index 00000000..ba88559b
Binary files /dev/null and b/tests/rabbit.JPG differ
diff --git a/tests/rabbit2.jpg b/tests/rabbit2.jpg
new file mode 100644
index 00000000..4076cbb0
Binary files /dev/null and b/tests/rabbit2.jpg differ
diff --git a/tests/test.py b/tests/test.py
new file mode 100644
index 00000000..78272258
--- /dev/null
+++ b/tests/test.py
@@ -0,0 +1,15 @@
+import os
+import roboflow
+from roboflow.models.object_detection import ObjectDetectionModel
+
+if __name__ == '__main__':
+ # Create Model with API Key + Model Endpoint
+ model = ObjectDetectionModel(api_key=os.getenv('ROBOFLOW_API_KEY'), dataset_slug=os.getenv('ROBOFLOW_MODEL'),
+ version=os.getenv('DATASET_VERSION'), stroke=3)
+ # Get prediction via an image
+ prediction_group = model.predict("rabbit2.jpg", hosted=False, format="json")
+
+ # Plot predictions using matplotlib
+ prediction_group.plot()
+ # prediction_group.add_prediction(Prediction(
+ # {'x': 1, 'y': 1, 'width': 1, 'height': 1, 'class': 'hi', 'confidence': 0.1}, 'dskfj.jpg'))