Skip to content

Commit

Permalink
[wip] job results representation negotiation
Browse files Browse the repository at this point in the history
  • Loading branch information
fmigneault committed Sep 19, 2024
1 parent c54f113 commit b25385c
Show file tree
Hide file tree
Showing 12 changed files with 351 additions and 231 deletions.
4 changes: 4 additions & 0 deletions docs/source/processes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2007,6 +2007,10 @@ format is employed according to the chosen location.
.. seealso::
For the :term:`WPS` endpoint, refer to :ref:`conf_settings`.

.. _proc_op_job_status:
.. fixme: add example
.. fixme: describe minimum fields and extra fields
.. _proc_op_result:

Obtaining Job Outputs, Results, Logs or Errors
Expand Down
36 changes: 28 additions & 8 deletions tests/functional/test_wps_package.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
import json
import logging
import os
import re
import shutil
import tempfile
from inspect import cleandoc
Expand Down Expand Up @@ -124,10 +123,6 @@ def setUpClass(cls) -> None:
def setUp(self) -> None:
self.process_store.clear_processes()

@classmethod
def request(cls, method, url, *args, **kwargs):
raise NotImplementedError # not used

def test_deploy_cwl_label_as_process_title(self):
title = "This process title comes from the CWL label"
cwl = {
Expand Down Expand Up @@ -3521,6 +3516,31 @@ def test_execute_cwl_enum_schema_combined_type_single_array_from_wps(self, mock_

assert results


@pytest.mark.functional
class WpsPackageAppTestResultResponses(WpsConfigBase, ResourcesUtil):
"""
Tests to evaluate the various combinations of results response representations.
.. seealso::
- :ref:`proc_exec_results`
- :ref:`proc_op_job_results`
"""
@classmethod
def setUpClass(cls) -> None:
cls.settings = {
"weaver.wps": True,
"weaver.wps_path": "/ows/wps",
"weaver.wps_restapi_path": "/",
"weaver.wps_output_path": "/wpsoutputs",
"weaver.wps_output_url": "http://localhost/wpsoutputs",
"weaver.wps_output_dir": "/tmp/weaver-test/wps-outputs", # nosec: B108 # don't care hardcoded for test
}
super(WpsPackageAppTestResultResponses, cls).setUpClass()

def setUp(self) -> None:
self.process_store.clear_processes()

def test_execute_single_output_prefer_header_return_representation_literal(self):
proc = "EchoResultsTester"
p_id = self.fully_qualified_test_process_name(proc)
Expand Down Expand Up @@ -3781,7 +3801,7 @@ def test_execute_single_output_response_raw_value_complex(self):
assert outputs.json["outputs"] == {
"output_json": {
"value": output_json,
"type": ContentType.APP_JSON,
"mediaType": ContentType.APP_JSON,
},
}

Expand Down Expand Up @@ -4362,7 +4382,7 @@ def test_execute_multi_output_prefer_header_return_minimal_override_transmission
},
"output_json": {
"value": output_json,
"type": ContentType.APP_JSON,
"mediaType": ContentType.APP_JSON,
},
"output_text": {
"href": f"{out_url}/{job_id}/output_text/output.txt",
Expand Down Expand Up @@ -4492,7 +4512,7 @@ def test_execute_multi_output_response_document_mixed(self):
},
"output_json": {
"value": output_json,
"type": ContentType.APP_JSON,
"mediaType": ContentType.APP_JSON,
},
"output_text": {
"href": f"{out_url}/{job_id}/output_text/output.txt",
Expand Down
12 changes: 9 additions & 3 deletions tests/functional/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
AnyUUID,
CWL,
ExecutionResults,
JobStatusResponse,
JSON,
ProcessDeployment,
ProcessDescription,
Expand Down Expand Up @@ -433,7 +434,7 @@ def deploy_process(cls,
return info # type: ignore

def _try_get_logs(self, status_url):
_resp = self.app.get(f"{status_url}/logs", headers=self.json_headers)
_resp = self.app.get(f"{status_url}/logs", headers=dict(self.json_headers))
if _resp.status_code == 200:
_text = "\n".join(_resp.json)
return f"Error logs:\n{_text}"
Expand All @@ -445,14 +446,19 @@ def fully_qualified_test_process_name(self, name=""):
test_name = f"{class_name}.{self._testMethodName}{extra_name}".replace(".", "-")
return test_name

@overload
def monitor_job(self, status_url, return_status=False, **__):
# type: (str, Literal[True], **Any) -> JobStatusResponse
...

def monitor_job(self,
status_url, # type: str
timeout=None, # type: Optional[int]
interval=None, # type: Optional[int]
return_status=False, # type: bool
wait_for_status=None, # type: Optional[str]
expect_failed=False, # type: bool
): # type: (...) -> ExecutionResults
): # type: (...) -> Union[ExecutionResults, JobStatusResponse]
"""
Job polling of status URL until completion or timeout.
Expand Down Expand Up @@ -505,7 +511,7 @@ def check_job_status(_resp, running=False):
return resp.json

def get_outputs(self, status_url):
resp = self.app.get(f"{status_url}/outputs", headers=self.json_headers)
resp = self.app.get(f"{status_url}/outputs", headers=dict(self.json_headers))
body = resp.json
pretty = json.dumps(body, indent=2, ensure_ascii=False)
assert resp.status_code == 200, f"Get outputs failed:\n{pretty}\n{self._try_get_logs(status_url)}"
Expand Down
92 changes: 92 additions & 0 deletions tests/test_execute.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
import itertools

import pytest
from pyramid.httpexceptions import HTTPBadRequest

from weaver.execute import ExecuteControlOption, ExecuteMode, ExecuteReturnPreference, parse_prefer_header_execute_mode


@pytest.mark.parametrize(
["headers", "support", "expected", "extra_prefer"],
[
# both modes supported (sync attempted upto max/specified wait time, unless async requested explicitly)
({}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC], (ExecuteMode.SYNC, 10, {}), ""),
# only supported async (enforced) - original behaviour
({}, [ExecuteControlOption.ASYNC], (ExecuteMode.ASYNC, None, {}), ""),
] +
[
(_headers, _support, _expected, _extra)
for (_headers, _support, _expected), _extra
in itertools.product(
[
# both modes supported (sync attempted upto max/specified wait time, unless async requested explicitly)
({"Prefer": ""}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.SYNC, 10, {})),
({"Prefer": "respond-async"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async, wait=4"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "wait=4"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.SYNC, 4, {"Preference-Applied": "wait=4"})),
({"Prefer": "wait=20"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {})), # larger than max time
# only supported async (enforced) - original behaviour
({"Prefer": ""}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {})),
({"Prefer": "respond-async"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async, wait=4"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "wait=4"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {})),
],
[
"",
f"return={ExecuteReturnPreference.MINIMAL}",
f"return={ExecuteReturnPreference.REPRESENTATION}"
# FIXME:
# Support with added ``Prefer: handling=strict`` or ``Prefer: handling=lenient``
# https://github.com/crim-ca/weaver/issues/701
]
)
]
)
def test_prefer_header_execute_mode(headers, support, expected, extra_prefer):
if extra_prefer and "Prefer" in headers:
headers["Prefer"] += f", {extra_prefer}" if headers["Prefer"] else extra_prefer
result = parse_prefer_header_execute_mode(headers, support)
assert result == expected


@pytest.mark.parametrize(
["headers", "expected"],
[
# 1st variant is considered as 1 Prefer header with all values supplied simultaneously
# 2nd variant is considered as 2 Prefer headers, each with their respective value
# (this is because urllib, under the hood, concatenates the list of header-values using ';' separator)
({"Prefer": "respond-async, wait=4"}, (ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async; wait=4"}, (ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
]
)
def test_parse_prefer_header_execute_mode_flexible(headers, expected):
"""
Ensure that the ``Prefer`` header supplied multiple times (allowed by :rfc:`7240`) is handled correctly.
"""
result = parse_prefer_header_execute_mode(headers, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC])
assert result == expected


@pytest.mark.parametrize("prefer_header", [
"wait=10s",
"wait=3.1416",
"wait=yes",
"wait=1,2,3", # technically, gets parsed as 'wait=1' (valid) and other '2', '3' parameters on their own
"wait=1;2;3",
"wait=1, wait=2",
"wait=1; wait=2",
])
def test_parse_prefer_header_execute_mode_invalid(prefer_header):
headers = {"Prefer": prefer_header}
with pytest.raises(HTTPBadRequest):
parse_prefer_header_execute_mode(headers, [ExecuteControlOption.ASYNC])
89 changes: 0 additions & 89 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
from beaker.cache import cache_region
from mypy_boto3_s3.literals import RegionName
from pyramid.httpexceptions import (
HTTPBadRequest,
HTTPConflict,
HTTPCreated,
HTTPError as PyramidHTTPError,
Expand All @@ -48,7 +47,6 @@
setup_test_file_hierarchy
)
from weaver import xml_util
from weaver.execute import ExecuteControlOption, ExecuteMode, ExecuteReturnPreference
from weaver.formats import ContentEncoding, ContentType, repr_json
from weaver.status import JOB_STATUS_CATEGORIES, STATUS_PYWPS_IDS, STATUS_PYWPS_MAP, Status, StatusCompliant, map_status
from weaver.utils import (
Expand Down Expand Up @@ -83,7 +81,6 @@
null,
parse_kvp,
parse_number_with_unit,
parse_prefer_header_execute_mode,
pass_http_error,
request_extra,
resolve_s3_from_http,
Expand Down Expand Up @@ -2122,92 +2119,6 @@ def test_parse_kvp(query, params, expected):
assert result == expected


@pytest.mark.parametrize(
["headers", "support", "expected", "extra_prefer"],
[
# both modes supported (sync attempted upto max/specified wait time, unless async requested explicitly)
({}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC], (ExecuteMode.SYNC, 10, {}), ""),
# only supported async (enforced) - original behaviour
({}, [ExecuteControlOption.ASYNC], (ExecuteMode.ASYNC, None, {}), ""),
] +
[
(_headers, _support, _expected, _extra)
for (_headers, _support, _expected), _extra
in itertools.product(
[
# both modes supported (sync attempted upto max/specified wait time, unless async requested explicitly)
({"Prefer": ""}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.SYNC, 10, {})),
({"Prefer": "respond-async"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async, wait=4"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "wait=4"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.SYNC, 4, {"Preference-Applied": "wait=4"})),
({"Prefer": "wait=20"}, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC],
(ExecuteMode.ASYNC, None, {})), # larger than max time
# only supported async (enforced) - original behaviour
({"Prefer": ""}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {})),
({"Prefer": "respond-async"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async, wait=4"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "wait=4"}, [ExecuteControlOption.ASYNC],
(ExecuteMode.ASYNC, None, {})),
],
[
"",
f"return={ExecuteReturnPreference.MINIMAL}",
f"return={ExecuteReturnPreference.REPRESENTATION}"
# FIXME:
# Support with added ``Prefer: handling=strict`` or ``Prefer: handling=lenient``
# https://github.com/crim-ca/weaver/issues/701
]
)
]
)
def test_prefer_header_execute_mode(headers, support, expected, extra_prefer):
if extra_prefer and "Prefer" in headers:
headers["Prefer"] += f", {extra_prefer}" if headers["Prefer"] else extra_prefer
result = parse_prefer_header_execute_mode(headers, support)
assert result == expected


@pytest.mark.parametrize(
["headers", "expected"],
[
# 1st variant is considered as 1 Prefer header with all values supplied simultaneously
# 2nd variant is considered as 2 Prefer headers, each with their respective value
# (this is because urllib, under the hood, concatenates the list of header-values using ';' separator)
({"Prefer": "respond-async, wait=4"}, (ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
({"Prefer": "respond-async; wait=4"}, (ExecuteMode.ASYNC, None, {"Preference-Applied": "respond-async"})),
]
)
def test_parse_prefer_header_execute_mode_flexible(headers, expected):
"""
Ensure that the ``Prefer`` header supplied multiple times (allowed by :rfc:`7240`) is handled correctly.
"""
result = parse_prefer_header_execute_mode(headers, [ExecuteControlOption.ASYNC, ExecuteControlOption.SYNC])
assert result == expected


@pytest.mark.parametrize("prefer_header", [
"wait=10s",
"wait=3.1416",
"wait=yes",
"wait=1,2,3", # technically, gets parsed as 'wait=1' (valid) and other '2', '3' parameters on their own
"wait=1;2;3",
"wait=1, wait=2",
"wait=1; wait=2",
])
def test_parse_prefer_header_execute_mode_invalid(prefer_header):
headers = {"Prefer": prefer_header}
with pytest.raises(HTTPBadRequest):
parse_prefer_header_execute_mode(headers, [ExecuteControlOption.ASYNC])


@pytest.mark.parametrize(["number", "binary", "unit", "expect"], [
(1.234, False, "B", "1.234 B"),
(10_000_000, False, "B", "10.000 MB"),
Expand Down
Loading

0 comments on commit b25385c

Please sign in to comment.