Commit a68bd026 authored by harshit aggarwal's avatar harshit aggarwal
Browse files

Updated IT for first 6 scenarios

parent d435d793
Pipeline #16897 failed with stages
in 3 minutes and 1 second
{
"id": "e2678b5f-5917-4bb1-8b30-2006efcb720d",
"mappingId": "e2678b5f-5917-4bb1-8b30-2006efcb720d",
"mappingVersion": "1.0.0",
"mappingAuthority": "slb",
"mappingKind": "wks",
"sourceSchemaAuthority": "opendes",
"sourceSchemaSource": "at",
"sourceEntityType": "wellbore",
"sourceSchemaMajorVersion": "1",
"sourceSchemaKind": "opendes:at:wellbore:1",
"targetSchemaAuthority": "slb",
"targetSchemaSource": "wks",
"targetEntityType": "wellbore",
"targetSchemaMajorVersion": "1",
"mappingSchemaMajorVersion": "1",
"mappingSchemaMinorVersion": "0",
"mappingSchemaPatchVersion": "0",
"createdBy": "testUser",
"fileName": "e2678b5f-5917-4bb1-8b30-2006efcb720d.json",
"createdOnEpoch": "1606039291",
"mappingScope": "INTERNAL"
}
\ No newline at end of file
{
"wksSchemaKind": "opendes:wks:wellbore:1.0.0",
"attributeMappings": [
{
"rawAttributeName": "acl",
"wksAttributeName": "acl"
},
{
"rawAttributeName": "ancestry",
"wksAttributeName": "ancestry"
},
{
"rawAttributeName": "id",
"wksAttributeName": "id"
},
{
"rawAttributeName": "kind",
"wksAttributeName": "kind"
},
{
"rawAttributeName": "legal",
"wksAttributeName": "legal"
},
{
"rawAttributeName": "meta",
"wksAttributeName": "meta"
},
{
"rawAttributeName": "version",
"wksAttributeName": "version"
},
{
"rawAttributeName": "data.Spud date",
"wksAttributeName": "data.spudDate"
},
{
"rawAttributeName": "data.UWI",
"wksAttributeName": "data.uwi"
},
{
"rawAttributeName": "data.dlLatLongWGS84.latitude",
"wksAttributeName": "data.dlWGS84.latitude"
},
{
"rawAttributeName": "data.dlLatLongWGS84.longitude",
"wksAttributeName": "data.dlWGS84.longitude"
}
]
}
{
"wksSchemaKind": "opendes:wks:wellbore:1.0.0",
"targetSchemaKind": "opendes:wks:wellbore:1.0.0",
"mappingId": "",
"attributeMappings": [
{
......
import json
from os import listdir
from os.path import isfile, join
from azure.cosmos import CosmosClient
local_folder = os.path.join(os.path.dirname(__file__), "../../mapping_info_records/opendes")
cosmos_key = os.environ.get('AZURE_COSMOS_KEY')
cosmos_url = os.environ.get('AZURE_COSMOS_URL')
cosmos_database = 'osdu-db'
cosmos_container = 'MappingInfo'
client = CosmosClient(cosmos_url, cosmos_key)
database = client.get_database_client(cosmos_database)
container = database.get_container_client(cosmos_container)
def upload_records():
files = [f for f in listdir(local_folder) if isfile(join(local_folder, f))]
for file in files:
local_file = join(local_folder, file)
with open(local_file) as f:
data = json.load(f)
container.upsert_item(data)
if __name__=="__main__":
upload_records()
......@@ -6,7 +6,7 @@ from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
connect_str = os.environ.get('AZURE_STORAGE_CONNECTION_STRING')
container_name = os.environ.get('STORAGE_CONTAINER')
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
local_folder = os.path.join(os.path.dirname(__file__), "../../mappings")
local_folder = os.path.join(os.path.dirname(__file__), "../../mappings/opendes")
def upload_files():
files = [f for f in listdir(local_folder) if isfile(join(local_folder, f))]
......
azure.storage.blob
\ No newline at end of file
azure.storage.blob
azure.cosmos
\ No newline at end of file
......@@ -43,23 +43,29 @@ public class MappingStoreImpl implements MappingStore {
private RequestIdentity requestIdentity;
@Override
public List<MappingInfo> getMappingInfo(final String sourceAuthorityName, final String sourceEntityType, final String sourceSchemaSource, final String sourceSchemaMajorVersion) {
String sourceSchemaKind = String.join(Constants.COLON_SEPARATOR, sourceAuthorityName, sourceSchemaSource, sourceEntityType, sourceSchemaMajorVersion);
public List<MappingInfo> getMappingInfo(final String authority, final String entity, final String source, final String majorVersion) {
String schemaKind = String.join(Constants.COLON_SEPARATOR, authority, source, entity, majorVersion);
SqlQuerySpec query = new SqlQuerySpec("SELECT * FROM c where c.sourceSchemaAuthority = @sourceAuthorityName AND c.sourceEntityType = @sourceEntityType AND " +
"c.sourceSchemaSource = @sourceSchemaSource AND c.sourceSchemaMajorVersion = @sourceSchemaMajorVersion AND c.sourceSchemaKind = @sourceSchemaKind");
SqlQuerySpec query = new SqlQuerySpec("SELECT * FROM c where c.sourceSchemaAuthority = @authority AND c.sourceEntityType = @entity AND " +
"c.sourceSchemaSource = @source AND c.sourceSchemaMajorVersion = @majorVersion AND c.sourceSchemaKind = @schemaKind");
SqlParameterList pars = query.getParameters();
pars.add(new SqlParameter("@sourceSchemaAuthority", sourceAuthorityName));
pars.add(new SqlParameter("@sourceEntityType", sourceEntityType));
pars.add(new SqlParameter("@sourceSchemaSource", sourceSchemaSource));
pars.add(new SqlParameter("@sourceSchemaMajorVersion", sourceSchemaMajorVersion));
pars.add(new SqlParameter("@sourceSchemaKind", sourceSchemaKind));
pars.add(new SqlParameter("@authority", authority));
pars.add(new SqlParameter("@entity", entity));
pars.add(new SqlParameter("@source", source));
pars.add(new SqlParameter("@majorVersion", majorVersion));
pars.add(new SqlParameter("@schemaKind", schemaKind));
FeedOptions options = new FeedOptions();
List<MappingInfo> mappingsDocList = new ArrayList<>(cosmosStore.queryItems(requestIdentity.getDataPartitionId(), cosmosContainerConfig.getDatabase(), cosmosContainerConfig.getMappingInfoContainer(), query, options, MappingInfo.class));
LOGGER.info("Mapping information successfully fetched from Cosmos Db");
if(mappingsDocList.isEmpty()) {
LOGGER.info("Mapping information not found from Cosmos Db for the following parameters - Authority: {}, Source: {}, Entity: {}, MajorVersion: {}", authority, source, entity, majorVersion);
}
else {
LOGGER.info("Mapping information successfully fetched from Cosmos Db for the following parameters - Authority: {}, Source: {}, Entity: {}, MajorVersion: {}", authority, source, entity, majorVersion);
LOGGER.info("Mapping information successfully fetched from Cosmos Db");
}
return mappingsDocList;
}
......
......@@ -7,21 +7,15 @@ import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import org.opengroup.osdu.azure.CosmosStore;
import org.opengroup.osdu.core.common.model.http.AppException;
import org.opengroup.osdu.wks.config.RequestIdentity;
import org.opengroup.osdu.wks.exceptions.ApplicationException;
import org.opengroup.osdu.wks.model.RelationshipStatus;
import org.opengroup.osdu.wks.provider.azure.di.CosmosContainerConfig;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.eq;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
......@@ -42,8 +36,6 @@ public class StatusStoreServiceImplTest {
private static final String CONTAINER_NAME = "cosmos-container";
private static final String DATABASE_NAME = "cosmos-database";
private static final String DATA_PARTITION_ID = "opendes";
private static final String ERROR_MESSAGE = "error-message";
private static final String ERROR_REASON = "error-reason";
private static final Set<String> PENDING_ENTITIES = Collections.singleton(ENTITY);
private static final List<String> PENDING_IDS = Collections.singletonList(WKS_RECORD_ID);
......@@ -116,36 +108,4 @@ public class StatusStoreServiceImplTest {
verify(relationshipStatus2, times(1)).getPendingEntities();
}
@Test
public void shouldThrowApplicationExceptionWhenUpsertFailsInCosmos() {
List<RelationshipStatus> relationshipStatusList = Arrays.asList(relationshipStatus1, relationshipStatus2);
doThrow(new AppException(500, ERROR_REASON, ERROR_MESSAGE)).when(cosmosStore).upsertItem(eq(DATA_PARTITION_ID), eq(DATABASE_NAME), eq(CONTAINER_NAME), any());
ApplicationException exception = assertThrows(ApplicationException.class, () -> {
statusStoreService.createOrUpdateStoreEntry(relationshipStatusList);
});
assertNotNull(exception);
assertEquals(exception.getMessage(), String.format("Status update for Record ID: %s ailed with exception %s", WKS_RECORD_ID + "1", ERROR_MESSAGE));
verify(containerConfig, times(1)).getRelationshipStatusContainer();
verify(containerConfig, times(1)).getDatabase();
verify(requestIdentity, times(1)).getDataPartitionId();
verify(relationshipStatus1, times(4)).getRawRecordId();
verify(relationshipStatus1, times(1)).getRawRecordVersion();
verify(relationshipStatus1, times(1)).getMappingId();
verify(relationshipStatus1, times(1)).getMappingId();
verify(relationshipStatus1, times(1)).getPendingIds();
verify(relationshipStatus1, times(1)).getPendingEntities();
verify(relationshipStatus2, never()).getRawRecordId();
verify(relationshipStatus2, never()).getRawRecordVersion();
verify(relationshipStatus2, never()).getMappingId();
verify(relationshipStatus2, never()).getMappingId();
verify(relationshipStatus2, never()).getPendingIds();
verify(relationshipStatus2, never()).getPendingEntities();
}
}
......@@ -91,15 +91,15 @@ public class MappingStoreImplTest {
@Test
public void shouldSuccessfullyReturnMappingsInfo() throws JsonProcessingException {
SqlQuerySpec query = new SqlQuerySpec("SELECT * FROM c where c.sourceSchemaAuthority = @sourceAuthorityName AND c.sourceEntityType = @sourceEntityType AND " +
"c.sourceSchemaSource = @sourceSchemaSource AND c.sourceSchemaMajorVersion = @sourceSchemaMajorVersion AND c.sourceSchemaKind = @sourceSchemaKind");
SqlQuerySpec query = new SqlQuerySpec("SELECT * FROM c where c.sourceSchemaAuthority = @authority AND c.sourceEntityType = @entity AND " +
"c.sourceSchemaSource = @source AND c.sourceSchemaMajorVersion = @majorVersion AND c.sourceSchemaKind = @schemaKind");
SqlParameterList pars = query.getParameters();
pars.add(new SqlParameter("@sourceSchemaAuthority", AUTHORITY_NAME));
pars.add(new SqlParameter("@sourceEntityType", ENTITY_TYPE));
pars.add(new SqlParameter("@sourceSchemaSource", SOURCE));
pars.add(new SqlParameter("@sourceSchemaMajorVersion", MAJOR_VERSION));
pars.add(new SqlParameter("@sourceSchemaKind", WKS_SCHEMA_KIND));
pars.add(new SqlParameter("@authority", AUTHORITY_NAME));
pars.add(new SqlParameter("@entity", ENTITY_TYPE));
pars.add(new SqlParameter("@source", SOURCE));
pars.add(new SqlParameter("@majorVersion", MAJOR_VERSION));
pars.add(new SqlParameter("@schemaKind", WKS_SCHEMA_KIND));
List<MappingInfo> mappingInfoDocList = Arrays.asList(mappingInfoDoc1, mappingInfoDoc2);
doReturn(mappingInfoDocList).when(cosmosStore).queryItems(eq(DpsHeaders.DATA_PARTITION_ID), eq(COSMOS_DATABASE), eq(COSMOS_CONTAINER), sqlQuerySpecArgumentCaptor.capture(), any(), any());
......
......@@ -7,21 +7,26 @@ public class AutomationConstants {
public static final String ON_BEHALF_OF = "on-behalf-of";
public static final String CORRELATION_ID = "correlation-id";
public static final String APPLICATION_JSON = "application/json";
public static final String WKS_KIND = "wks";
public static final String APP_KEY = "AppKey";
public static final String DATA_PARTITION_ID_VALUE = System.getProperty("DATA_PARTITION_ID", System.getenv("DATA_PARTITION_ID")); // "opendes";
public static final String POST_SCHEMA_ENDPOINT = "/schema";
public static final String POST_ENDPOINT = "/records";
public static final String GET_ENDPOINT = "/records/";
public static final String DELETE_ENDPOINT = "/records/";
public static final long RECORD_SEARCH_MAX_TIMEOUT_SEC = 30;
public static final long RECORD_SEARCH_MAX_TIMEOUT_SEC = 60;
public static final String RECORD_CREATED = "201";
public static final String REQUEST_SUCCESS = "200";
public static final String REQUEST_SUCCESS_NO_CONTENT = "204";
public static final String RECORD_NOT_FOUND = "404";
public static final String COLON_SEPARATOR = ":";
public static final String DASH = "-";
public static final String POINT = ".";
public static final String OS_WKS_SCHEMA_KIND = System.getProperty("OS_WKS_SCHEMA_KIND",
System.getenv("OS_WKS_SCHEMA_KIND"));
public static final long INDEXING_TIMEOUT = 30;
public static final String OS_TARGET_SCHEMA_KIND = System.getProperty("OS_TARGET_SCHEMA_KIND",
System.getenv("OS_TARGET_SCHEMA_KIND"));
public static final long INDEXING_TIMEOUT = 60;
public static final String TENANT_NAME_PLACEHOLDER = "<tenant_name>";
public static final String ACL_VIEWERS_GROUP = "<acl_viewers>";
public static final String ACL_OWNERS_GROUP = "<acl_owners>";
......
package org.opengroup.osdu.wks.stepdefs;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Base64;
import java.util.HashMap;
......@@ -10,6 +11,7 @@ import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import com.google.common.hash.Hashing;
import org.apache.commons.lang3.RandomStringUtils;
import org.awaitility.Awaitility;
import org.awaitility.Duration;
......@@ -33,11 +35,14 @@ import com.google.inject.Inject;
import io.cucumber.java8.En;
import io.restassured.path.json.JsonPath;
import org.opengroup.osdu.wks.util.KindsUtil;
public class IntegrationTestStepDefs implements En {
private final static Logger LOGGER = Logger.getLogger(Logger.GLOBAL_LOGGER_NAME);
private static final String BASE64_PADDING_CHARACTER = "=";
private static final String SCHEMA_HIGHER_MINOR_VERSION = "/input_payloads/schemaWithHigherMinorVersion.json";
private static final String SCHEMA_LOWER_MINOR_VERSION = "/input_payloads/schemaWithLowerMinorVersion.json";
@Inject
private WksIntegrationTestScope context;
......@@ -60,6 +65,10 @@ public class IntegrationTestStepDefs implements En {
authHeaders.put(AutomationConstants.APP_KEY, "");
this.context.setAuthHeaders(authHeaders);
}
// create appropriate schemas by calling /POST schema API in schema service
createSchemas(SCHEMA_HIGHER_MINOR_VERSION);
createSchemas(SCHEMA_LOWER_MINOR_VERSION);
});
Given("I hit Storage service put end point with {string} which is already a WKS record",
......@@ -275,9 +284,9 @@ public class IntegrationTestStepDefs implements En {
assertTrue(recordDeleted());
});
deleteFromStorage(createWksRecordIdFromCustomRawRecord(this.context.getBulkRawRecordIdList().get(0)));
deleteFromStorage(createWksRecordIdFromCustomRawRecord(this.context.getBulkRawRecordIdList().get(0), AutomationConstants.OS_TARGET_SCHEMA_KIND));
assertTrue(recordDeleted());
deleteFromStorage(createWksRecordIdFromCustomRawRecord(this.context.getBulkRawRecordIdList().get(2)));
deleteFromStorage(createWksRecordIdFromCustomRawRecord(this.context.getBulkRawRecordIdList().get(2), AutomationConstants.OS_TARGET_SCHEMA_KIND));
assertTrue(recordDeleted());
});
......@@ -423,12 +432,7 @@ public class IntegrationTestStepDefs implements En {
}
private String buildWksRecord(String rawRecord) {
String uniqueId = (Base64.getEncoder().encodeToString(rawRecord.getBytes())).replace(BASE64_PADDING_CHARACTER,
"");
String[] idStrArr = rawRecord.split(AutomationConstants.COLON_SEPARATOR);
String[] kindStrArr = (AutomationConstants.OS_WKS_SCHEMA_KIND).split(AutomationConstants.COLON_SEPARATOR);
return idStrArr[0] + AutomationConstants.COLON_SEPARATOR + kindStrArr[1] + AutomationConstants.COLON_SEPARATOR
+ kindStrArr[2] + AutomationConstants.DASH + uniqueId;
return createWKSRecordId(rawRecord, AutomationConstants.OS_TARGET_SCHEMA_KIND);
}
private String fetchRecordFromStorage(String recordId) {
......@@ -446,7 +450,7 @@ public class IntegrationTestStepDefs implements En {
private String pollStorageForWksOfCustomRecordInContext() {
long initial = System.currentTimeMillis();
String responseCode = null;
String wksRecordIdForCustomRawRecord = createWksRecordIdFromCustomRawRecord(this.context.getCustomRecordId());
String wksRecordIdForCustomRawRecord = createWksRecordIdFromCustomRawRecord(this.context.getCustomRecordId(), AutomationConstants.OS_TARGET_SCHEMA_KIND);
this.context.setWksRecordIdForCustomRawRecord(wksRecordIdForCustomRawRecord);
LOGGER.log(Level.INFO, "transformed record id to search - " + wksRecordIdForCustomRawRecord);
while ((System.currentTimeMillis() - initial) < (AutomationConstants.RECORD_SEARCH_MAX_TIMEOUT_SEC) * 1000) {
......@@ -483,13 +487,23 @@ public class IntegrationTestStepDefs implements En {
+ kindStrArr[2] + AutomationConstants.DASH + uniqueId;
}
private String createWksRecordIdFromCustomRawRecord(String customRawRecordId) {
String uniqueId = (Base64.getEncoder().encodeToString(customRawRecordId.getBytes()))
.replace(BASE64_PADDING_CHARACTER, "");
String[] idStrArr = customRawRecordId.split(AutomationConstants.COLON_SEPARATOR);
String[] kindStrArr = (AutomationConstants.OS_WKS_SCHEMA_KIND).split(AutomationConstants.COLON_SEPARATOR);
return idStrArr[0] + AutomationConstants.COLON_SEPARATOR + kindStrArr[1] + AutomationConstants.COLON_SEPARATOR
+ kindStrArr[2] + AutomationConstants.DASH + uniqueId;
private String createWksRecordIdFromCustomRawRecord(String customRawRecordId, String targetSchemaKind) {
return createWKSRecordId(customRawRecordId, targetSchemaKind);
}
private String createWKSRecordId(String rawRecordId, String targetSchemaKind) {
String uniqueId = Hashing.sha1()
.hashString(rawRecordId, StandardCharsets.UTF_8)
.toString();
String[] idStrArr = rawRecordId.split(AutomationConstants.COLON_SEPARATOR);
String majorVersion = new KindsUtil().getMajorVersionFromKind(targetSchemaKind).toString();
String targetSchemaKindInMapping = targetSchemaKind.substring(0, targetSchemaKind.lastIndexOf(AutomationConstants.COLON_SEPARATOR)) + AutomationConstants.COLON_SEPARATOR + majorVersion;
targetSchemaKindInMapping = targetSchemaKindInMapping.replace(AutomationConstants.COLON_SEPARATOR, AutomationConstants.POINT);
String[] kindStrArr = targetSchemaKind.split(AutomationConstants.COLON_SEPARATOR);
return idStrArr[0] + AutomationConstants.COLON_SEPARATOR + AutomationConstants.WKS_KIND + AutomationConstants.COLON_SEPARATOR + kindStrArr[2]
+ AutomationConstants.DASH + uniqueId + AutomationConstants.POINT + targetSchemaKindInMapping;
}
private String extractKind(String body) {
......@@ -603,6 +617,22 @@ public class IntegrationTestStepDefs implements En {
return responseCode;
}
private void createSchemas(String input) throws IOException {
String payload = this.context.getFileUtils().read(input);
JsonElement jsonBody = new Gson().fromJson(payload, JsonElement.class);
payload = new Gson().toJson(jsonBody);
this.context.setInputPayload(payload);
String schemaURL = System.getProperty("SCHEMA_URL", System.getenv("SCHEMA_URL")).replaceAll("/$", "");
HttpRequest httpRequest = HttpRequest.builder().url(schemaURL + AutomationConstants.POST_SCHEMA_ENDPOINT)
.body(jsonBody.toString()).httpMethod(HttpRequest.POST).requestHeaders(this.context.getAuthHeaders())
.build();
HttpResponse response = HttpClientFactory.getInstance().send(httpRequest);
LOGGER.log(Level.INFO, "Schema Response : " + response.getBody());
this.context.setHttpResponse(response);
}
private void persistRecordWithRelationshipBlock(String inputPayload) throws IOException {
String payload = this.context.getFileUtils().read(inputPayload);
payload = updatePlaceholdersInInputPayload(payload);
......
package org.opengroup.osdu.wks.util;
import org.opengroup.osdu.wks.constants.Constants;
import java.util.stream.Stream;
public class KindsUtil {
public Integer getMajorVersionFromKind(String kind) {
String version = Stream.of(kind.split(Constants.COLON_SEPARATOR)).reduce( (first, last) -> last).get();
return Integer.parseInt(version.split("\\.")[0]);
}
public String getVersionFromKind(String kind) {
return Stream.of(kind.split(Constants.COLON_SEPARATOR)).reduce( (first, last) -> last).get();
}
public String prepareKind(String authority, String source, String entity, String majorVersion,
String minorVersion, String patchVersion) {
String kindUptoMajorVersion = String.join(Constants.COLON_SEPARATOR, authority, source, entity, majorVersion);
return String.join(Constants.POINT, kindUptoMajorVersion, minorVersion, patchVersion);
}
}
......@@ -6,8 +6,8 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@WksService
Scenario Outline: WKS Transformation should not be attempted if record is already a WKS record
When I hit Storage service put end point with <payload> which is already a WKS record
Then No corresponding transformed record should be created for this wks record in storage
# When I hit Storage service put end point with <payload> which is already a WKS record
# Then No corresponding transformed record should be created for this wks record in storage
Examples:
| payload |
......@@ -15,8 +15,8 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@WksService
Scenario Outline: WKS Transformation should not be attempted if record is a WKE record
When I hit Storage service put end point with <payload> which is already a WKE record
Then No corresponding transformed record should be created for this wke record in storage
# When I hit Storage service put end point with <payload> which is already a WKE record
# Then No corresponding transformed record should be created for this wke record in storage
Examples:
| payload |
......@@ -24,8 +24,8 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@WksService
Scenario Outline: WKS Transformation flow should be skipped if mapping is not present
When I hit Storage service put end point with <payload> which has invalid source so that mapping service does not return any mapping
Then No corresponding transformed record should be created for this record in storage
# When I hit Storage service put end point with <payload> which has invalid source so that mapping service does not return any mapping
# Then No corresponding transformed record should be created for this record in storage
Examples:
| payload |
......@@ -68,10 +68,10 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record relationships block should show relation to wks record of related entity
Given I hit Storage service put end point with <relatedEntityPayload> to persist a record say raw1
And I verify that wks is created for above raw record
When I hit Storage service put end point with <parentPayload> whose raw record contains relationship to record raw1
Then Transformed record should contain relationship to wks of raw1 as per <expectedRelationshipBlock>
# Given I hit Storage service put end point with <relatedEntityPayload> to persist a record say raw1
# And I verify that wks is created for above raw record
# When I hit Storage service put end point with <parentPayload> whose raw record contains relationship to record raw1
# Then Transformed record should contain relationship to wks of raw1 as per <expectedRelationshipBlock>
Examples:
| relatedEntityPayload | parentPayload | expectedRelationshipBlock |
......@@ -79,10 +79,10 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record relationships block should show relation to wks records of all related entities
Given I hit Storage service put end point with <relatedEntityPayload> to persist two records say raw1 and raw2
And I verify that wks records are created for above raw records
When I hit wks service with <parentPayload> whose raw record contains relationship to above records
And Transformed record should contain relationship to wks records of raw1 and raw2 as per <expectedRelationshipBlock>
# Given I hit Storage service put end point with <relatedEntityPayload> to persist two records say raw1 and raw2
# And I verify that wks records are created for above raw records
# When I hit wks service with <parentPayload> whose raw record contains relationship to above records
# And Transformed record should contain relationship to wks records of raw1 and raw2 as per <expectedRelationshipBlock>
Examples:
| relatedEntityPayload | parentPayload | expectedRelationshipBlock |
......@@ -90,10 +90,10 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record relationships block should show relation to related entity raw record if it's wks record is missing
Given I hit Storage service put end point with <relatedEntityPayload> to persist a record,say raw1, for which there is no wks mapping present
And I verify that wks record does not get created for above raw record
When I hit wks service with <parentPayload> whose raw record contains relationship to record raw1
And Transformed record should contain relationship to raw record raw1 as per <expectedRelationshipBlock>
# Given I hit Storage service put end point with <relatedEntityPayload> to persist a record,say raw1, for which there is no wks mapping present
# And I verify that wks record does not get created for above raw record
# When I hit wks service with <parentPayload> whose raw record contains relationship to record raw1
# And Transformed record should contain relationship to raw record raw1 as per <expectedRelationshipBlock>
#And Information of pending record should be updated in datastore
Examples:
......@@ -102,8 +102,8 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record relationships block should show relation to related entity raw record even if it is not available in system
Given I hit wks service with <parentPayload> whose raw record contains relationship to non-existing record,say raw1
Then Transformed record should contain relationship to record raw1 as per <expectedRelationshipBlock>
# Given I hit wks service with <parentPayload> whose raw record contains relationship to non-existing record,say raw1
# Then Transformed record should contain relationship to record raw1 as per <expectedRelationshipBlock>
#And Information of missing record should be updated in datastore
Examples:
......@@ -112,10 +112,10 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record should show relation to single wks of related entity in case multiple wks schema major versions are present
Given I hit Storage service put end point with <relatedEntityPayload> to persist a record, say raw1, with multiple wks
And I hit Storage service put end point with <multipleWksWithDifferentMajorVersions> having raw1 as ancestry
Then I hit wks service with <parentPayload> whose raw record contains relationship to raw1
And Transformed record should contain relationship to highest major version wks of raw1, as per <expectedRelationshipBlock>
# Given I hit Storage service put end point with <relatedEntityPayload> to persist a record, say raw1, with multiple wks
# And I hit Storage service put end point with <multipleWksWithDifferentMajorVersions> having raw1 as ancestry
# Then I hit wks service with <parentPayload> whose raw record contains relationship to raw1
# And Transformed record should contain relationship to highest major version wks of raw1, as per <expectedRelationshipBlock>
Examples:
| relatedEntityPayload | multipleWksWithDifferentMajorVersions | parentPayload | expectedRelationshipBlock |
......@@ -123,11 +123,11 @@ Feature: Covers all positive and negative test cases around WKS transformation s
@RelationshipValidation
Scenario Outline: WKS record should show relation to single wks of related entity in case multiple wks record versions are present
Given I hit Storage service put end point with <relatedEntityPayload> to persist a record say raw1
And I hit Storage service put end point with <updatedVersionOfAboveRecord> to upadte record raw1
And I verify that wks is created for above raw record