Commit 731a01ef authored by fabian serin's avatar fabian serin
Browse files

Merge branch 'fserin/BUG_duplicated_operation_ids' into 'master'

Fix duplicates in openapi generation, unit test to detect duplicates included

See merge request !158
parents 0baf8e13 1461eef2
Pipeline #51616 passed with stages
in 11 minutes and 54 seconds
......@@ -47,7 +47,8 @@ router_bulk = APIRouter() # router dedicated to bulk APIs
BULK_URN_PREFIX_VERSION = "wdms-1"
BULK_URI_FIELD = "bulkURI"
OPERATION_IDS = {"record_data": "write_record_data",
"chunk_data": "post_chunk_data"}
def _check_df_columns_type(df: pd.DataFrame):
if any((type(t) is not str for t in df.columns)):
......@@ -193,7 +194,7 @@ async def set_bulk_field_and_send_record(ctx: Context, bulk_id, record):
)
@OpenApiHandler.set(operation_id="post_data", request_body=REQUEST_DATA_BODY_SCHEMA)
@OpenApiHandler.set(operation_id=OPERATION_IDS["record_data"], request_body=REQUEST_DATA_BODY_SCHEMA)
@router_bulk.post(
'/{record_id}/data',
summary='Writes data as a whole bulk, creates a new version.',
......@@ -204,7 +205,7 @@ any previous bulk. Previous bulk versions are accessible via the get bulk data v
Support JSON and Parquet format ('Content_Type' must be set accordingly).
In case of JSON the orient must be set accordingly. Support http chunked encoding transfer.
""" + REQUIRED_ROLES_WRITE,
operation_id="write_record_data",
operation_id=OPERATION_IDS["record_data"],
responses={
404: {},
200: {}
......@@ -228,7 +229,7 @@ async def post_data(record_id: str,
return await set_bulk_field_and_send_record(ctx=ctx, bulk_id=bulk_id, record=record)
@OpenApiHandler.set(operation_id="post_chunk_data", request_body=REQUEST_DATA_BODY_SCHEMA)
@OpenApiHandler.set(operation_id=OPERATION_IDS["chunk_data"], request_body=REQUEST_DATA_BODY_SCHEMA)
@router_bulk.post(
"/{record_id}/sessions/{session_id}/data",
summary="Send a data chunk. Session must be complete/commit once all chunks are sent.",
......@@ -237,7 +238,7 @@ async def post_data(record_id: str,
"Support JSON and Parquet format ('Content_Type' must be set accordingly). "
"In case of JSON the orient must be set accordingly. Support http chunked encoding."
+ REQUIRED_ROLES_WRITE,
operation_id="post_chunk_data",
operation_id=OPERATION_IDS["chunk_data"],
responses={400: {"error": "Record not found"}}
)
async def post_chunk_data(record_id: str,
......
......@@ -142,6 +142,22 @@ async def shutdown_event():
await get_http_client_session().close()
def update_operation_ids():
# Ensure all operation_id are uniques
from fastapi.routing import APIRoute
operation_ids = set()
for route in wdms_app.routes:
if isinstance(route, APIRoute):
if route.operation_id in operation_ids:
# duplicate detected
new_operation_id = route.unique_id
if route.operation_id in OpenApiHandler._handlers:
OpenApiHandler._handlers[new_operation_id] = OpenApiHandler._handlers[route.operation_id]
route.operation_id = new_operation_id
else:
operation_ids.add(route.operation_id)
DDMS_V2_PATH = '/ddms/v2'
DDMS_V3_PATH = '/ddms/v3'
ALPHA_APIS_PREFIX = '/alpha'
......@@ -245,6 +261,10 @@ wdms_app.include_router(
prefix=ALPHA_APIS_PREFIX + DDMS_V2_PATH + log_ddms_v2.LOGS_API_BASE_PATH,
tags=tags, dependencies=dependencies)
#The multiple instanciation of bulk_utils router create some duplicates operation_id
update_operation_ids()
# ------------- add alpha feature: ONLY MOUNTED IN DEV AND DA ENVs
def enable_alpha_feature():
""" must be called to enable and activate alpha feature"""
......
......@@ -7,6 +7,7 @@ httpx
numpy
pandas
pyarrow
openapi-spec-validator
# Note since 3.8 includes Mock 4.0+.
mock>=4.0
......
......@@ -8453,7 +8453,7 @@
},
"post": {
"description": "\nWrites data to the associated record. It creates a new version.\nPayload is expected to contain the entire bulk which will replace as latest version\nany previous bulk. Previous bulk versions are accessible via the get bulk data version API.\nSupport JSON and Parquet format ('Content_Type' must be set accordingly).\nIn case of JSON the orient must be set accordingly. Support http chunked encoding transfer.\n<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "write_record_data",
"operationId": "post_data_alpha_ddms_v2_logs__record_id__data_post",
"parameters": [
{
"in": "path",
......@@ -8492,6 +8492,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -8803,7 +8820,7 @@
"/alpha/ddms/v2/logs/{record_id}/sessions/{session_id}/data": {
"post": {
"description": "Send a data chunk. Session must be complete/commit once all chunks are sent. This will create a new and single version aggregating all and previous bulk.Support JSON and Parquet format ('Content_Type' must be set accordingly). In case of JSON the orient must be set accordingly. Support http chunked encoding.<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "post_chunk_data",
"operationId": "post_chunk_data_alpha_ddms_v2_logs__record_id__sessions__session_id__data_post",
"parameters": [
{
"in": "path",
......@@ -8851,6 +8868,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9123,7 +9157,7 @@
},
"post": {
"description": "\nWrites data to the associated record. It creates a new version.\nPayload is expected to contain the entire bulk which will replace as latest version\nany previous bulk. Previous bulk versions are accessible via the get bulk data version API.\nSupport JSON and Parquet format ('Content_Type' must be set accordingly).\nIn case of JSON the orient must be set accordingly. Support http chunked encoding transfer.\n<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "write_record_data",
"operationId": "post_data_alpha_ddms_v3_wellboretrajectories__record_id__data_post",
"parameters": [
{
"in": "path",
......@@ -9162,6 +9196,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9473,7 +9524,7 @@
"/alpha/ddms/v3/wellboretrajectories/{record_id}/sessions/{session_id}/data": {
"post": {
"description": "Send a data chunk. Session must be complete/commit once all chunks are sent. This will create a new and single version aggregating all and previous bulk.Support JSON and Parquet format ('Content_Type' must be set accordingly). In case of JSON the orient must be set accordingly. Support http chunked encoding.<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "post_chunk_data",
"operationId": "post_chunk_data_alpha_ddms_v3_wellboretrajectories__record_id__sessions__session_id__data_post",
"parameters": [
{
"in": "path",
......@@ -9521,6 +9572,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9832,6 +9900,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......
......@@ -20,13 +20,14 @@ The updated spec file must then be committed with the latest changes.
OPENAPI_PATH = 'spec/generated/openapi.json'
from fastapi.testclient import TestClient
import pytest
from app.wdms_app import wdms_app
from app.helper import traces
import rapidjson as json
from tests.unit.test_utils import ctx_fixture
from fastapi.testclient import TestClient
from openapi_spec_validator import validate_spec
import rapidjson as json
from app.helper import traces
from app.wdms_app import wdms_app
# Initialize traces exporter in app, like it is in app's startup decorator
wdms_app.trace_exporter = traces.CombinedExporter(service_name='tested-ddms')
......@@ -38,11 +39,15 @@ def client(ctx_fixture):
wdms_app.dependency_overrides = {}
def test_api_spec(client):
@pytest.fixture
def openapi_json(client):
# get the openapi spec
response = client.get("/openapi.json")
assert response.status_code == 200
openapi_json = response.json()
yield response.json()
def test_api_spec(openapi_json):
openapi_text = json.dumps(openapi_json, sort_keys=True, indent=2)
# get the saved spec
with open(OPENAPI_PATH, 'r') as specfile:
......@@ -55,3 +60,26 @@ def test_api_spec(client):
specfile.write(openapi_text)
# assert error
assert False, f"{OPENAPI_PATH} has changed, commit the updated file"
def test_api_spec_for_duplicates(openapi_json):
# Check operationId for all paths are different
# structure is
# root
# + paths
# + url for instance "/alpha/ddms/v2/logs/{record_id}/data"
# + method for instance get, post, ...
# + operationId
path_dict = openapi_json.get("paths", None)
operation_id_set = set()
assert path_dict is not None
for url, url_dict in path_dict.items():
for method, method_dict in url_dict.items():
operation_id = method_dict.get("operationId", None)
assert operation_id not in operation_id_set, f"{method}:{url} {operation_id} already defined"
operation_id_set.add(operation_id)
@pytest.mark.skip(reason="Generated open api is not correct due to exclusiMinimum fast api open bug")
def test_open_api_validity(openapi_json):
validate_spec(openapi_json)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment