From d761acc883166e62a42e40c27f66a45b6d023dc3 Mon Sep 17 00:00:00 2001 From: Philip Krauss <35487337+philkra@users.noreply.github.com> Date: Sun, 25 Feb 2024 15:32:44 +0100 Subject: [PATCH] Cleanup doc blocks (#194) * linting + comments * refactor to reduce complexity * escape foward slash * fresh code gen * linting cosmetics --- codegen/generator.py | 134 ++++++++++-------- .../files_transormations_test.py | 13 +- .../helpers_bulkprocessor_test.py | 2 - xata/api/migrations.py | 4 +- xata/api/records.py | 2 +- xata/api/search_and_filter.py | 10 +- xata/api_response.py | 2 +- 7 files changed, 92 insertions(+), 75 deletions(-) diff --git a/codegen/generator.py b/codegen/generator.py index 129a546..b3f1525 100644 --- a/codegen/generator.py +++ b/codegen/generator.py @@ -26,7 +26,6 @@ import json import logging import re -import textwrap from typing import Any, Dict import coloredlogs @@ -35,7 +34,7 @@ from xata.helpers import to_rfc339 -VERSION = "2.0.0" +VERSION = "2.1.0" coloredlogs.install(level="INFO") logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s") @@ -186,7 +185,8 @@ def generate_endpoint(path: str, method: str, endpoint: dict, parameters: list, else: endpoint_params = get_endpoint_params(path, endpoint, parameters, references) if "description" in endpoint: - desc = endpoint["description"].strip() + # poor man's escape of "\" as linter does not like it + desc = endpoint["description"].replace("\\", "\\\\").strip() else: logging.info("missing description for %s.%s - using summary." % (path, endpoint["operationId"])) desc = endpoint["summary"].strip() @@ -259,56 +259,13 @@ def get_endpoint_params(path: str, endpoint: dict, parameters: dict, references: "response_content_types": [], } if len(parameters) > 0: - # Check for convience param swaps - curated_param_list = [] - for r in parameters: - if "$ref" in r and r["$ref"] == REF_DB_BRANCH_NAME_PARAM: - logging.debug("adding smart value for %s" % "#/components/parameters/DBBranchNameParam") - # push two new params to cover for string creation - curated_param_list.append(OPTIONAL_CURATED_PARAM_DB_NAME) - curated_param_list.append(OPTIONAL_CURATED_PARAM_BRANCH_NAME) - skel["smart_db_branch_name"] = True - elif "$ref" in r and r["$ref"] == REF_WORKSPACE_ID_PARAM: - # and endpoint['operationId'] not in REF_WORKSPACE_ID_PARAM_EXCLUSIONS: - logging.debug("adding smart value for %s" % "#/components/parameters/WorkspaceIdParam") - curated_param_list.append(OPTIONAL_CURATED_PARAM_WORKSPACE_ID) - skel["smart_workspace_id"] = True - else: - curated_param_list.append(r) + body = _generate_enpoint_param_list(parameters) + curated_param_list = body["params"] + skel["smart_db_branch_name"] = body["smart_db_branch_name"] + skel["smart_workspace_id"] = body["smart_workspace_id"] for r in curated_param_list: - p = None - # if not in ref: endpoint specific params - if "$ref" in r and r["$ref"] in references: - p = references[r["$ref"]] - if "$ref" in p["schema"]: - p["type"] = type_replacement(references[p["schema"]["$ref"]]["type"]) - elif "type" in p["schema"]: - p["type"] = type_replacement(p["schema"]["type"]) - else: - logging.error("could resolve type of '%s' in the lookup." % r["$ref"]) - exit(11) - # else if name not in r: method specific params - elif "name" in r: - p = r - p["type"] = type_replacement(r["schema"]["type"]) - # else fail with code: 11 - else: - logging.error("could resolve reference %s in the lookup." % r["$ref"]) - exit(11) - - if "required" not in p: - p["required"] = False - if "description" not in p: - p["description"] = "" - - p["name"] = p["name"].strip() - p["nameParam"] = get_param_name(p["name"]) - p["description"] = p["description"].strip() - p["trueType"] = p["type"] - if not p["required"]: - p["type"] += " = None" - + p = _prepare_endpoint_param(r, references) skel["list"].append(p) if p["in"] == "path": @@ -358,17 +315,78 @@ def get_endpoint_params(path: str, endpoint: dict, parameters: dict, references: } ) + skel["list"] = _sanitize_enpoint_list(skel["list"], skel["has_optional_params"]) + return skel + + +def _prepare_endpoint_param(r, references: list) -> list: + p = None + # if not in ref: endpoint specific params + if "$ref" in r and r["$ref"] in references: + p = references[r["$ref"]] + if "$ref" in p["schema"]: + p["type"] = type_replacement(references[p["schema"]["$ref"]]["type"]) + elif "type" in p["schema"]: + p["type"] = type_replacement(p["schema"]["type"]) + else: + logging.error("could resolve type of '%s' in the lookup." % r["$ref"]) + exit(11) + # else if name not in r: method specific params + elif "name" in r: + p = r + p["type"] = type_replacement(r["schema"]["type"]) + # else fail with code: 11 + else: + logging.error("could resolve reference %s in the lookup." % r["$ref"]) + exit(11) + + if "required" not in p: + p["required"] = False + if "description" not in p: + p["description"] = "" + p["name"] = p["name"].strip() + p["nameParam"] = get_param_name(p["name"]) + p["description"] = p["description"].strip() + p["trueType"] = p["type"] + if not p["required"]: + p["type"] += " = None" + + return p + + +def _generate_enpoint_param_list(parameters: list) -> list: + resp = { + "params": [], + "smart_db_branch_name": False, + "smart_workspace_id": False, + } + for r in parameters: + if "$ref" in r and r["$ref"] == REF_DB_BRANCH_NAME_PARAM: + logging.debug("adding smart value for %s" % "#/components/parameters/DBBranchNameParam") + # push two new params to cover for string creation + resp["params"].append(OPTIONAL_CURATED_PARAM_DB_NAME) + resp["params"].append(OPTIONAL_CURATED_PARAM_BRANCH_NAME) + resp["smart_db_branch_name"] = True + elif "$ref" in r and r["$ref"] == REF_WORKSPACE_ID_PARAM: + # and endpoint['operationId'] not in REF_WORKSPACE_ID_PARAM_EXCLUSIONS: + logging.debug("adding smart value for %s" % "#/components/parameters/WorkspaceIdParam") + resp["params"].append(OPTIONAL_CURATED_PARAM_WORKSPACE_ID) + resp["smart_workspace_id"] = True + else: + resp["params"].append(r) + return resp + + +def _sanitize_enpoint_list(lst: list, has_optional_params: bool) -> list: + # reorder for optional params to be last + if has_optional_params: + lst = [e for e in lst if e["required"]] + [e for e in lst if not e["required"]] # Remove duplicates tmp = {} - for p in skel["list"]: + for p in lst: if p["name"].lower() not in tmp: tmp[p["name"].lower()] = p - skel["list"] = tmp.values() - - # reorder for optional params to be last - if skel["has_optional_params"]: - skel["list"] = [e for e in skel["list"] if e["required"]] + [e for e in skel["list"] if not e["required"]] - return skel + return tmp.values() def resolve_references(spec: dict) -> dict: diff --git a/tests/integration-tests/files_transormations_test.py b/tests/integration-tests/files_transormations_test.py index 9075715..c5485bf 100644 --- a/tests/integration-tests/files_transormations_test.py +++ b/tests/integration-tests/files_transormations_test.py @@ -23,10 +23,11 @@ import pytest import utils from faker import Faker -from PIL import Image, ImageChops from xata.client import XataClient +# from PIL import Image, ImageChops + class TestFilesTransformations(object): def setup_class(self): @@ -90,7 +91,7 @@ def test_with_nested_operations(self): upload = self.client.records().insert("Attachments", payload, columns=["one_file.url"]) assert upload.is_success() - img = utils.get_file_content(utils.get_file_name("images/03.png")) + # img = utils.get_file_content(utils.get_file_name("images/03.png")) self.client.files().transform( upload["one_file"]["url"], {"rotate": 180, "blur": 50, "trim": {"top": 20, "right": 30, "bottom": 20, "left": 0}}, @@ -127,18 +128,18 @@ def test_unknown_operations(self): upload = self.client.records().insert("Attachments", payload, columns=["one_file.url"]) assert upload.is_success() - with pytest.raises(Exception) as e: + with pytest.raises(Exception): self.client.files().transform(upload["one_file"]["url"], {}) - with pytest.raises(Exception) as e: + with pytest.raises(Exception): self.client.files().transform(upload["one_file"]["url"], {"donkey": "kong"}) def test_unknown_image_id(self): # must fail with a 403 - with pytest.raises(Exception) as e: + with pytest.raises(Exception): self.client.files().transform("https://us-east-1.storage.xata.sh/lalala", {"rotate": 90}) def test_invalid_url(self): # must fail with a 403 - with pytest.raises(Exception) as e: + with pytest.raises(Exception): self.client.files().transform("https:/xata.sh/oh-hello", {"rotate": 90}) diff --git a/tests/integration-tests/helpers_bulkprocessor_test.py b/tests/integration-tests/helpers_bulkprocessor_test.py index 4d18d2f..98b5c72 100644 --- a/tests/integration-tests/helpers_bulkprocessor_test.py +++ b/tests/integration-tests/helpers_bulkprocessor_test.py @@ -17,8 +17,6 @@ # under the License. # -import time - import pytest import utils from faker import Faker diff --git a/xata/api/migrations.py b/xata/api/migrations.py index 6af12d9..b14657a 100644 --- a/xata/api/migrations.py +++ b/xata/api/migrations.py @@ -165,7 +165,7 @@ def compare_branch_with_user_schema( headers = {"content-type": "application/json"} return self.request("POST", url_path, headers, payload) - def compare_schemas(self, payload: dict, db_name: str = None, branch_name: str = None) -> ApiResponse: + def compare_schemas(self, branch_name: str, payload: dict, db_name: str = None) -> ApiResponse: """ Compare branch schemas. @@ -180,9 +180,9 @@ def compare_schemas(self, payload: dict, db_name: str = None, branch_name: str = - 5XX: Unexpected Error - default: Unexpected Error + :param branch_name: str The Database Name :param payload: dict content :param db_name: str = None The name of the database to query. Default: database name from the client. - :param branch_name: str = None The name of the branch to query. Default: branch name from the client. :returns ApiResponse """ diff --git a/xata/api/records.py b/xata/api/records.py index 7048e1f..0043376 100644 --- a/xata/api/records.py +++ b/xata/api/records.py @@ -134,7 +134,7 @@ def insert_with_id( if_version: int = None, ) -> ApiResponse: """ - By default, IDs are auto-generated when data is insterted into Xata. Sending a request to this endpoint allows us to insert a record with a pre-existing ID, bypassing the default automatic ID generation. + By default, IDs are auto-generated when data is inserted into Xata. Sending a request to this endpoint allows us to insert a record with a pre-existing ID, bypassing the default automatic ID generation. Reference: https://xata.io/docs/api-reference/db/db_branch_name/tables/table_name/data/record_id#insert-record-with-id Path: /db/{db_branch_name}/tables/{table_name}/data/{record_id} diff --git a/xata/api/search_and_filter.py b/xata/api/search_and_filter.py index cdaeccb..62ef766 100644 --- a/xata/api/search_and_filter.py +++ b/xata/api/search_and_filter.py @@ -57,7 +57,7 @@ def query(self, table_name: str, payload: dict = None, db_name: str = None, bran } ``` - For usage, see also the [API Guide](https://xata.io/docs/api-guide/get). + For usage, see also the [Xata SDK documentation](https://xata.io/docs/sdk/get). ### Column selection @@ -459,7 +459,7 @@ def query(self, table_name: str, payload: dict = None, db_name: str = None, bran * `*` matches zero or more characters * `?` matches exactly one character - If you want to match a string that contains a wildcard character, you can escape them using a backslash (`\`). You can escape a backslash by usign another backslash. + If you want to match a string that contains a wildcard character, you can escape them using a backslash (`\\`). You can escape a backslash by usign another backslash. You can also use the `$endsWith` and `$startsWith` operators: @@ -888,7 +888,7 @@ def search_table(self, table_name: str, payload: dict, db_name: str = None, bran """ Run a free text search operation in a particular table. - The endpoint accepts a `query` parameter that is used for the free text search and a set of structured filters (via the `filter` parameter) that are applied before the search. The `filter` parameter uses the same syntax as the [query endpoint](/api-reference/db/db_branch_name/tables/table_name/) with the following exceptions: + The endpoint accepts a `query` parameter that is used for the free text search and a set of structured filters (via the `filter` parameter) that are applied before the search. The `filter` parameter uses the same syntax as the [query endpoint](/docs/api-reference/db/db_branch_name/tables/table_name/query#filtering) with the following exceptions: * filters `$contains`, `$startsWith`, `$endsWith` don't work on columns of type `text` * filtering on columns of type `multiple` is currently unsupported @@ -1137,10 +1137,10 @@ def aggregate(self, table_name: str, payload: dict, db_name: str = None, branch_ While the summary endpoint is served from a transactional store and the results are strongly consistent, the aggregate endpoint is served from our columnar store and the results are only eventually consistent. On the other hand, the aggregate endpoint uses a - store that is more appropiate for analytics, makes use of approximative algorithms + store that is more appropriate for analytics, makes use of approximation algorithms (e.g for cardinality), and is generally faster and can do more complex aggregations. - For usage, see the [API Guide](https://xata.io/docs/api-guide/aggregate). + For usage, see the [Aggregation documentation](https://xata.io/docs/sdk/aggregate). Reference: https://xata.io/docs/api-reference/db/db_branch_name/tables/table_name/aggregate#run-aggregations-over-a-table Path: /db/{db_branch_name}/tables/{table_name}/aggregate diff --git a/xata/api_response.py b/xata/api_response.py index 621b436..a324d1f 100644 --- a/xata/api_response.py +++ b/xata/api_response.py @@ -95,7 +95,7 @@ def status_code(self) -> int: :returns int """ return self.response.status_code - + @property def error_message(self) -> Union[str, None]: """