Commit 1461eef2 authored by fabian serin's avatar fabian serin
Browse files

Improvement from Yannick to solve missing description

parent db4178a2
Pipeline #51486 passed with stages
in 14 minutes and 18 seconds
......@@ -145,13 +145,17 @@ async def shutdown_event():
def update_operation_ids():
# Ensure all operation_id are uniques
from fastapi.routing import APIRoute
operation_ids = set()
for route in wdms_app.routes:
if isinstance(route, APIRoute):
if route.operation_id in bulk_utils.OPERATION_IDS.values(): # All route with possible duplicate
if route.operation_id in operation_ids:
# duplicate detected
new_operation_id = route.unique_id
if route.operation_id in OpenApiHandler._handlers:
OpenApiHandler._handlers[new_operation_id] = OpenApiHandler._handlers.pop(route.operation_id)
route.operation_id = route.unique_id
OpenApiHandler._handlers[new_operation_id] = OpenApiHandler._handlers[route.operation_id]
route.operation_id = new_operation_id
else:
operation_ids.add(route.operation_id)
DDMS_V2_PATH = '/ddms/v2'
......
......@@ -8492,6 +8492,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -8851,6 +8868,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9162,6 +9196,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9521,6 +9572,23 @@
}
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"example": "{\n \"columns\":[\n \"Ref\",\n \"col_1\",\n \"col_2\"\n ],\n \"index\":[\n 0,\n 1,\n 2,\n 3,\n 4\n ],\n \"data\":[\n [\n 0.0,\n 1111.1,\n 2222.1\n ],\n [\n 0.5,\n 1111.2,\n 2222.2\n ],\n [\n 1.0,\n 1111.3,\n 2222.3\n ],\n [\n 1.5,\n 1111.4,\n 2222.4\n ],\n [\n 2.0,\n 1111.5,\n 2222.5\n ]\n ]\n}"
}
},
"application/x-parquet": {
"schema": {
"format": "binary",
"type": "string"
}
}
},
"description": "\nContains the data corresponding to the dataframe. The header \"Content-Type\" must be set accordingly to the format sent:\n<br/>&nbsp;**Parquet** format(*application/x-parquet*): see [Apache parquet website](https://parquet.apache.org/).\n<br/>&nbsp;**JSON** format (*application/json*): see [Pandas.Dataframe JSON format](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_json.html).\n In that case 'orient' parameter must be provided \n.\n Examples in JSON for data with 5 rows and 3 columns with different _orient_: \n* split: <br/>`{\"columns\":[\"Ref\",\"col_1\",\"col_2\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,1111.1,2222.1],[0.5,1111.2,2222.2],[1.0,1111.3,2222.3],[1.5,1111.4,2222.4],[2.0,1111.5,2222.5]]}`<br/>&nbsp;\n* index: <br/>`{\"0\":{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},\"1\":{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},\"2\":{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},\"3\":{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},\"4\":{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}}`<br/>&nbsp;\n* columns: <br/>`{\"Ref\":{\"0\":0.0,\"1\":0.5,\"2\":1.0,\"3\":1.5,\"4\":2.0},\"col_1\":{\"0\":1111.1,\"1\":1111.2,\"2\":1111.3,\"3\":1111.4,\"4\":1111.5},\"col_2\":{\"0\":2222.1,\"1\":2222.2,\"2\":2222.3,\"3\":2222.4,\"4\":2222.5}}`<br/>&nbsp;\n* records: <br/>`[{\"Ref\":0.0,\"col_1\":1111.1,\"col_2\":2222.1},{\"Ref\":0.5,\"col_1\":1111.2,\"col_2\":2222.2},{\"Ref\":1.0,\"col_1\":1111.3,\"col_2\":2222.3},{\"Ref\":1.5,\"col_1\":1111.4,\"col_2\":2222.4},{\"Ref\":2.0,\"col_1\":1111.5,\"col_2\":2222.5}]`<br/>&nbsp;",
"required": true
},
"responses": {
"200": {
"content": {
......@@ -9793,7 +9861,7 @@
},
"post": {
"description": "\nWrites data to the associated record. It creates a new version.\nPayload is expected to contain the entire bulk which will replace as latest version\nany previous bulk. Previous bulk versions are accessible via the get bulk data version API.\nSupport JSON and Parquet format ('Content_Type' must be set accordingly).\nIn case of JSON the orient must be set accordingly. Support http chunked encoding transfer.\n<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "post_data_alpha_ddms_v3_welllogs__record_id__data_post",
"operationId": "write_record_data",
"parameters": [
{
"in": "path",
......@@ -10160,7 +10228,7 @@
"/alpha/ddms/v3/welllogs/{record_id}/sessions/{session_id}/data": {
"post": {
"description": "Send a data chunk. Session must be complete/commit once all chunks are sent. This will create a new and single version aggregating all and previous bulk.Support JSON and Parquet format ('Content_Type' must be set accordingly). In case of JSON the orient must be set accordingly. Support http chunked encoding.<p>Required roles: 'users.datalake.editors' or 'users.datalake.admins'.</p>",
"operationId": "post_chunk_data_alpha_ddms_v3_welllogs__record_id__sessions__session_id__data_post",
"operationId": "post_chunk_data",
"parameters": [
{
"in": "path",
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment