diff --git a/README.md b/README.md index 33401e8..4d3452e 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ To run the API go the to the API's directory and run the following: #### Using Linux based docker: ```sh -sudo NV_GPU=0 nvidia-docker run -itv $(pwd)/models:/models -p :1234 yolov4_inference_api_gpu +sudo NV_GPU=0 nvidia-docker run -itv $(pwd)/models:/models $(pwd)/models_hash:/models_hash -p :1234 yolov4_inference_api_gpu ``` The can be any unique port of your choice. diff --git a/src/main/model_hash.json b/models_hash/model_hash.json similarity index 100% rename from src/main/model_hash.json rename to models_hash/model_hash.json diff --git a/src/main/deep_learning_service.py b/src/main/deep_learning_service.py index 3f0c1ea..05bfb40 100644 --- a/src/main/deep_learning_service.py +++ b/src/main/deep_learning_service.py @@ -17,7 +17,7 @@ def __init__(self): # dictionary to hold the model instances (model_name: string -> model_instance: AbstractInferenceEngine) self.models_dict = {} # read from json file and append to dict - file_name = 'model_hash.json' + file_name = '/models_hash/model_hash.json' file_exists = os.path.exists(file_name) if file_exists: try: @@ -26,7 +26,7 @@ def __init__(self): except: self.models_hash_dict = {} else: - with open('model_hash.json', 'w'): + with open('/models_hash/model_hash.json', 'w'): self.models_hash_dict = {} self.labels_hash_dict = {} self.base_models_dir = '/models' @@ -62,7 +62,7 @@ def load_all_models(self): if key not in models: del self.models_hash_dict[key] # append to json file - with open('model_hash.json', "w") as fp: + with open('/models_hash/model_hash.json', "w") as fp: json.dump(self.models_hash_dict, fp) return self.models_hash_dict diff --git a/src/main/result.jpg b/src/main/result.jpg deleted file mode 100644 index f4d888d..0000000 Binary files a/src/main/result.jpg and /dev/null differ