diff --git a/testing/.gitignore b/testing/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..c43cba27a5527052f729609094b8675838df5fe1
--- /dev/null
+++ b/testing/.gitignore
@@ -0,0 +1,4 @@
+# Output
+target/*
+*/.pyc
+/target/
diff --git a/testing/indexer-test-core/pom.xml b/testing/indexer-test-core/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..3e3821f726bd738cf0f6475c1c8f04da14512b3d
--- /dev/null
+++ b/testing/indexer-test-core/pom.xml
@@ -0,0 +1,119 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>org.opengroup.osdu.indexer</groupId>
+    <artifactId>indexer-test-core</artifactId>
+    <version>0.0.1</version>
+
+    <properties>
+        <maven.compiler.target>1.8</maven.compiler.target>
+        <maven.compiler.source>1.8</maven.compiler.source>
+        <cucumber.version>1.2.5</cucumber.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.google.api-client</groupId>
+            <artifactId>google-api-client</artifactId>
+            <version>1.28.0</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-core</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <!-- Cucumber -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.12</version>
+        </dependency>
+        <dependency>
+            <groupId>info.cukes</groupId>
+            <artifactId>cucumber-java</artifactId>
+            <version>${cucumber.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>info.cukes</groupId>
+            <artifactId>cucumber-junit</artifactId>
+            <version>${cucumber.version}</version>
+        </dependency>
+
+        <!-- Gson: Java to Json conversion -->
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+            <version>2.8.5</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.jaxrs</groupId>
+            <artifactId>jackson-jaxrs-json-provider</artifactId>
+            <version>2.9.9</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.glassfish</groupId>
+            <artifactId>javax.json</artifactId>
+            <version>1.1.4</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-client</artifactId>
+            <version>1.19.4</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.2</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.6</version>
+        </dependency>
+
+        <!--Elasticsearch-->
+        <dependency>
+            <groupId>org.elasticsearch</groupId>
+            <artifactId>elasticsearch</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.elasticsearch.client</groupId>
+            <artifactId>elasticsearch-rest-client</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.elasticsearch.client</groupId>
+            <artifactId>elasticsearch-rest-high-level-client</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+
+        <!--Logging-->
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-to-slf4j</artifactId>
+            <version>2.11.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-jdk14</artifactId>
+            <version>1.8.0-beta4</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>27.1-jre</version>
+        </dependency>
+    </dependencies>
+</project>
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6a9404c68bf35b90955d43ea05f376a98ebf79f
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java
@@ -0,0 +1,103 @@
+package org.opengroup.osdu.common;
+
+import com.sun.jersey.api.client.ClientResponse;
+
+import cucumber.api.DataTable;
+
+import org.opengroup.osdu.models.Setup;
+import org.opengroup.osdu.models.TestIndex;
+import org.opengroup.osdu.response.ErrorResponseMock;
+import org.opengroup.osdu.util.Config;
+import org.opengroup.osdu.util.HTTPClient;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+
+
+public class DeleteSchemaSteps extends TestsBase {
+
+    private static String timeStamp = String.valueOf(System.currentTimeMillis());
+
+    private Map<String, String> headers = httpClient.getCommonHeader();
+
+    private static boolean dunit = false;
+
+    private String deleteUrl;
+
+    private Map<String, TestIndex> inputRecordMap = new HashMap<>();
+
+    public DeleteSchemaSteps(HTTPClient httpClient) {
+        super(httpClient);
+    }
+
+    /******************One time cleanup for whole feature**************/
+    public void tearDown() {
+        for (String kind : inputRecordMap.keySet()) {
+            TestIndex testIndex = inputRecordMap.get(kind);
+            testIndex.cleanupIndex();
+        }
+    }
+
+    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
+        List<Setup> inputlist = dataTable.asList(Setup.class);
+        for (Setup input : inputlist) {
+            TestIndex testIndex = new TestIndex();
+            testIndex.setHttpClient(httpClient);
+            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
+            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
+            testIndex.setMappingFile(input.getMappingFile());
+            testIndex.setRecordFile(input.getRecordFile());
+            inputRecordMap.put(testIndex.getKind(), testIndex);
+        }
+        /******************One time setup for whole feature**************/
+        if (!dunit) {
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+                public void run() {
+                    tearDown();
+                }
+            });
+            dunit = true;
+            for (String kind : inputRecordMap.keySet()) {
+                TestIndex testIndex = inputRecordMap.get(kind);
+                testIndex.setupIndex();
+            }
+        }
+    }
+
+    public void i_send_a_delete_request_with(String kind) throws Throwable {
+        String actualKind = generateActualName(kind, timeStamp);
+        deleteUrl = String.format(this.getApi(), actualKind);
+    }
+
+    public void the_index_should_get_delete_and_I_should_get_response(int code) throws Throwable {
+        ClientResponse clientResponse = executeGetRequest(deleteUrl, headers, httpClient.getAccessToken());
+        assertEquals(code, clientResponse.getStatus());
+    }
+
+    public void i_should_get_response_with_reason_message_and_errors(List<Integer> codes, String type, String msg,
+                                                                     String error) throws Throwable {
+        ErrorResponseMock response = executeQuery(deleteUrl, null, headers, httpClient.getAccessToken(), ErrorResponseMock.class);
+        assertTrue(codes.contains(response.getResponseCode()));
+        if (response.getErrors() != null) {
+            error = generateActualName(error, timeStamp);
+            assertEquals(generateActualName(error,timeStamp), response.getErrors().get(0));
+        }
+        assertNotNull(response.getMessage());
+        assertNotNull(response.getReason());
+        assertEquals(type.toLowerCase(), response.getReason().toLowerCase());
+        assertEquals(generateActualName(msg,timeStamp), response.getMessage());
+    }
+
+    @Override
+    protected String getHttpMethod() {
+        return "DELETE";
+    }
+
+    @Override
+    protected String getApi() {
+        return Config.getSearchBaseURL() + "index/%s";
+    }
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java
new file mode 100644
index 0000000000000000000000000000000000000000..5031153ebd8200657ce8929521cac995979ff1cb
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java
@@ -0,0 +1,109 @@
+package org.opengroup.osdu.common;
+
+import com.google.gson.JsonObject;
+import com.google.gson.JsonParser;
+
+import org.opengroup.osdu.models.Setup;
+import org.opengroup.osdu.models.TestIndex;
+import org.opengroup.osdu.response.ErrorResponseMock;
+import org.opengroup.osdu.util.Config;
+import org.opengroup.osdu.util.HTTPClient;
+
+import com.sun.jersey.api.client.ClientResponse;
+import cucumber.api.DataTable;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+public class GetSchemaSteps extends TestsBase {
+
+    private static String timeStamp = String.valueOf(System.currentTimeMillis());
+    private static boolean dunit = false;
+    private Map<String, String> headers = httpClient.getCommonHeader();
+    private Map<String, TestIndex> inputRecordMap = new HashMap<>();
+    private String schemaUrl;
+
+    public GetSchemaSteps(HTTPClient httpClient) {
+        super(httpClient);
+    }
+
+    
+    /******************One time cleanup for whole feature**************/
+    public void tearDown() {
+        for (String kind : inputRecordMap.keySet()) {
+            TestIndex testIndex = inputRecordMap.get(kind);
+            testIndex.cleanupIndex();
+        }
+    }
+
+    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
+        List<Setup> inputlist = dataTable.asList(Setup.class);
+        for (Setup input : inputlist) {
+            TestIndex testIndex = new TestIndex();
+            testIndex.setHttpClient(httpClient);
+            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
+            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
+            testIndex.setMappingFile(input.getMappingFile());
+            inputRecordMap.put(testIndex.getKind(), testIndex);
+        }
+        /******************One time setup for whole feature**************/
+        if (!dunit) {
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+                public void run() {
+                    tearDown();
+                }
+            });
+            dunit = true;
+            for (String kind : inputRecordMap.keySet()) {
+                TestIndex testIndex = inputRecordMap.get(kind);
+                testIndex.addIndex();
+            }
+        }
+    }
+
+    public void i_send_get_schema_request_with(String kind) throws Throwable {
+        String actualKind = generateActualName(kind, timeStamp);
+        schemaUrl = String.format(this.getApi(), actualKind);
+    }
+
+    public void i_send_request_to_tenant(String tenant) throws Throwable {
+        headers = HTTPClient.overrideHeader(headers, getTenantMapping(tenant));
+    }
+
+    public void i_should_get_response_with_reason_message_and_errors(int responseCode, String type, String msg,
+                                                                     String error) throws Throwable {
+
+        ErrorResponseMock response = executeQuery(schemaUrl, null, headers, httpClient.getAccessToken(), ErrorResponseMock.class);
+        assertEquals(responseCode, response.getResponseCode());
+        if (response.getErrors() != null) {
+            error = generateActualName(error, timeStamp);
+            assertEquals(generateActualName(error, timeStamp), response.getErrors().get(0));
+        }
+        assertEquals(type, response.getReason());
+        assertEquals(generateActualName(msg, timeStamp),response.getMessage());
+    }
+
+    public void i_should_get_status_with_response(int statusCode, String response) throws Throwable {
+
+        ClientResponse schemaResponse = executeGetRequest(schemaUrl, headers, httpClient.getAccessToken());
+        assertEquals(statusCode, schemaResponse.getStatus());
+        String expectedResponse = generateActualName(response, timeStamp);
+        JsonObject expectedJson = new JsonParser().parse(expectedResponse).getAsJsonObject();
+        JsonObject actualJson = new JsonParser().parse(schemaResponse.getEntity(String.class)).getAsJsonObject();
+        assertEquals(expectedJson, actualJson);
+    }
+    
+    @Override
+    protected String getApi() {
+        return Config.getSearchBaseURL() + "index/schema/%s";
+    }
+
+    @Override
+    protected String getHttpMethod() {
+        return "GET";
+    }
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java
new file mode 100644
index 0000000000000000000000000000000000000000..47e6fdbb215673724f557695f957a3981de170ec
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java
@@ -0,0 +1,114 @@
+package org.opengroup.osdu.common;
+
+import com.google.api.client.http.HttpMethods;
+import com.google.gson.Gson;
+import com.sun.jersey.api.client.ClientResponse;
+import cucumber.api.DataTable;
+import lombok.extern.java.Log;
+
+import java.util.*;
+
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.opengroup.osdu.models.Setup;
+import org.opengroup.osdu.request.Query;
+import org.opengroup.osdu.response.ResponseMock;
+import org.opengroup.osdu.util.Config;
+import org.opengroup.osdu.util.ElasticUtils;
+import org.opengroup.osdu.util.HTTPClient;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+@Log
+public class MappingSteps extends TestsBase {
+    private Map<String, String> headers;
+    private String fieldName;
+    private String timeStamp;
+    private static boolean dunit = false;
+    private Query requestQuery = new Query();
+    private ElasticUtils elasticUtils = new ElasticUtils();
+    private Map<String, Object> requestPayload = new HashMap<>();
+
+    private static String updateIndexMappingUrl = Config.getIndexerBaseURL() + "kinds";
+    private static String searchQeueryURL=Config.getSearchBaseURL() + "query";    
+
+
+    public MappingSteps(HTTPClient httpClient) {
+        super(httpClient);
+        headers = httpClient.getCommonHeader();
+        fieldName="";
+        timeStamp = String.valueOf(System.currentTimeMillis());
+    }
+
+    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) {
+        if (!dunit) {
+            List<Setup> inputlist = dataTable.asList(Setup.class);
+            setUp(inputlist, timeStamp);
+            dunit = true;
+        }
+    }
+
+    public void i_update_in_to_enable_multifield_indexing(String fieldNameVal, String index1, String index2) throws Throwable {
+        Set<String> indices = new HashSet<>();
+        System.out.println("Indices "+ index1 + index2);
+        indices.add(generateActualName(index1,timeStamp));
+        indices.add(generateActualName(index2,timeStamp));
+        fieldName=fieldNameVal;
+        requestPayload.put("indices", indices);
+        requestPayload.put("operator", "keyword");
+    }
+
+    public void i_send_request_to_tenant(String tenant) {
+        headers = HTTPClient.overrideHeader(headers, getTenantMapping(tenant));
+    }
+
+    public void i_should_get_response(int responseCode) throws Throwable {
+        String payload = new Gson().toJson(requestPayload);
+        ClientResponse clientResponse = httpClient.send(HttpMethods.PUT, this.getApi()+"/"+this.fieldName, payload, headers, httpClient.getAccessToken());
+        assertEquals(responseCode, clientResponse.getStatus());
+    }
+
+    public void i_send_with(String query, String kind) {
+        requestQuery.setQuery(query);
+        requestQuery.setKind(generateActualName(kind, timeStamp));
+    }
+
+    public void i_send_None_with(String kind) {
+        requestQuery.setKind(generateActualName(kind, timeStamp));
+    }
+
+    public void i_aggregate_by(String aggField) throws Throwable {
+        requestQuery.setAggregateBy(aggField+".keyword");
+    }
+
+    public void i_should_get_in_response_records(int resultCount) {
+        String payload = requestQuery.toString();
+        ResponseMock response = executeQuery(searchQeueryURL, payload, this.headers, httpClient.getAccessToken(), ResponseMock.class);
+        assertEquals(200, response.getResponseCode());
+        assertEquals(resultCount, response.getResults().size());
+    }
+
+    public void i_want_to_validate_mapping(String indexOne, String indexTwo,String fieldName,String type) throws Throwable {
+    	ImmutableOpenMap<String, MappingMetaData> elasticMapping = elasticUtils.getMapping(generateActualName(indexOne,timeStamp));
+    	assertNotNull(elasticMapping);
+        MappingMetaData typeMapping = elasticMapping.get(type);
+        Map<String, Object> mapping = typeMapping.sourceAsMap();
+        assertNotNull(mapping);
+        String mappingJson = new Gson().toJson(mapping);
+        System.out.println(mappingJson);
+        assertTrue(mappingJson.contains(fieldName));
+        assertTrue(mappingJson.contains("raw"));
+    }
+    
+    @Override
+    protected String getApi() {
+        return updateIndexMappingUrl;
+    }
+
+    @Override
+    protected String getHttpMethod() {
+        return "POST";
+    }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java
new file mode 100644
index 0000000000000000000000000000000000000000..9dbfc60604e9f28e711b1c55c4949633bc91d603
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java
@@ -0,0 +1,187 @@
+package org.opengroup.osdu.common;
+
+import com.google.common.collect.MapDifference;
+import com.google.common.collect.Maps;
+import com.google.gson.Gson;
+import com.google.gson.reflect.TypeToken;
+import com.sun.jersey.api.client.ClientResponse;
+import cucumber.api.DataTable;
+import lombok.extern.java.Log;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.opengroup.osdu.models.Acl;
+import org.opengroup.osdu.models.Setup;
+import org.opengroup.osdu.models.TestIndex;
+import org.opengroup.osdu.util.ElasticUtils;
+import org.opengroup.osdu.util.FileHandler;
+import org.opengroup.osdu.util.HTTPClient;
+
+import javax.ws.rs.HttpMethod;
+import java.io.File;
+import java.io.IOException;
+import java.lang.reflect.Type;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.*;
+import static org.opengroup.osdu.util.Config.getEntitlementsDomain;
+import static org.opengroup.osdu.util.Config.getStorageBaseURL;
+
+@Log
+public class RecordSteps extends TestsBase {
+    private Map<String, TestIndex> inputIndexMap = new HashMap<>();
+    private boolean shutDownHookAdded = false;
+
+    private String timeStamp;
+    private List<Map<String, Object>> records;
+    private Map<String, String> headers;
+
+    public RecordSteps(HTTPClient httpClient) {
+        super(httpClient);
+        timeStamp = String.valueOf(System.currentTimeMillis());
+        headers = httpClient.getCommonHeader();
+    }
+    
+    /******************One time cleanup for whole feature**************/
+    public void tearDown() {
+        for (String kind : inputIndexMap.keySet()) {
+            TestIndex testIndex = inputIndexMap.get(kind);
+            testIndex.cleanupIndex();
+            testIndex.deleteSchema(kind);
+        }
+        if (records != null && records.size() > 0) {
+            for (Map<String, Object> testRecord : records) {
+                String id = testRecord.get("id").toString();
+                httpClient.send(HttpMethod.DELETE, getStorageBaseURL() + "records/" + id, null, headers, httpClient.getAccessToken());
+                log.info("Deleted the records");
+            }
+        }
+    }
+
+    public void the_schema_is_created_with_the_following_kind(DataTable dataTable) {
+
+        List<Setup> inputList = dataTable.asList(Setup.class);
+        for (Setup input : inputList) {
+            TestIndex testIndex = new TestIndex();
+            testIndex.setHttpClient(httpClient);
+            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
+            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
+            testIndex.setSchemaFile(input.getSchemaFile());
+            inputIndexMap.put(testIndex.getKind(), testIndex);
+        }
+
+        /******************One time setup for whole feature**************/
+        if (!shutDownHookAdded) {
+            Runtime.getRuntime().addShutdownHook(new Thread() {
+                public void run() {
+                    tearDown();
+                }
+            });
+            shutDownHookAdded = true;
+            for (String kind : inputIndexMap.keySet()) {
+                TestIndex testIndex = inputIndexMap.get(kind);
+                testIndex.setupSchema();
+            }
+        }
+    }
+
+    public void i_ingest_records_with_the_for_a_given(String record, String dataGroup, String kind) {
+
+        String actualKind = generateActualName(kind, timeStamp);
+        try {
+            String fileContent = FileHandler.readFile(String.format("%s.%s", record, "json"));
+            records = new Gson().fromJson(fileContent, new TypeToken<List<Map<String, Object>>>() {}.getType());
+
+            for (Map<String, Object> testRecord : records) {
+                testRecord.put("id", generateActualName(testRecord.get("id").toString(), timeStamp));
+                testRecord.put("kind", actualKind);
+                testRecord.put("legal", generateLegalTag());
+                String[] x_acl = {generateActualName(dataGroup,timeStamp)+"."+getEntitlementsDomain()};
+                Acl acl = Acl.builder().viewers(x_acl).owners(x_acl).build();
+                testRecord.put("acl", acl);
+            }
+            String payLoad = new Gson().toJson(records);
+            ClientResponse clientResponse = httpClient.send(HttpMethod.PUT, getStorageBaseURL() + "records", payLoad, headers, httpClient.getAccessToken());
+            assertEquals(201, clientResponse.getStatus());
+        } catch (Exception ex) {
+            throw new AssertionError(ex.getMessage());
+        }
+    }
+
+    public void i_should_get_the_documents_for_the_in_the_Elastic_Search(int expectedCount, String index) throws Throwable {
+        index = generateActualName(index, timeStamp);
+        long numOfIndexedDocuments = createIndex(index);
+        assertEquals(expectedCount, numOfIndexedDocuments);
+    }
+
+    public void i_should_get_the_elastic_for_the_tenant_testindex_timestamp_well_in_the_Elastic_Search(String expectedMapping, String type, String index) throws Throwable {
+        index = generateActualName(index, timeStamp);
+        ElasticUtils elasticUtils = new ElasticUtils();
+        ImmutableOpenMap<String, MappingMetaData> elasticMapping = elasticUtils.getMapping(index);
+        assertNotNull(elasticMapping);
+
+        MappingMetaData typeMapping = elasticMapping.get(type);
+        Map<String, Object> mapping = typeMapping.sourceAsMap();
+        assertNotNull(mapping);
+        assertTrue(areJsonEqual(expectedMapping, mapping.toString()));
+    }
+
+    public void iShouldGetTheNumberDocumentsForTheIndexInTheElasticSearchWithOutSkippedAttribute(int expectedCount, String index, String skippedAttributes) throws Throwable {
+        index = generateActualName(index, timeStamp);
+        ElasticUtils elasticUtils = new ElasticUtils();
+        long numOfIndexedDocuments = createIndex(index);
+        long documentCountByQuery = elasticUtils.fetchRecordsByExistQuery(index, skippedAttributes);
+        assertEquals(expectedCount, documentCountByQuery);
+    }
+
+    private long createIndex(String index) throws InterruptedException, IOException {
+        ElasticUtils elasticUtils = new ElasticUtils();
+        long numOfIndexedDocuments = 0;
+        int iterator;
+
+        // index.refresh_interval is set to default 30s, wait for 40s initially
+        Thread.sleep(40000);
+
+        for (iterator = 0; iterator < 20; iterator++) {
+
+            numOfIndexedDocuments = elasticUtils.fetchRecords(index);
+            if (numOfIndexedDocuments > 0) {
+                log.info(String.format("index: %s | attempts: %s | documents acknowledged by elastic: %s", index, iterator, numOfIndexedDocuments));
+                break;
+            } else {
+                log.info(String.format("index: %s | documents acknowledged by elastic: %s", index, numOfIndexedDocuments));
+                Thread.sleep(5000);
+            }
+
+            if ((iterator + 1) % 5 == 0) elasticUtils.refreshIndex(index);
+        }
+        if (iterator >= 20) {
+            fail(String.format("index not created after waiting for %s seconds", ((40000 + iterator * 5000) / 1000)));
+        }
+        return numOfIndexedDocuments;
+    }
+
+    private Boolean areJsonEqual(String firstJson, String secondJson) {
+        Gson gson = new Gson();
+        Type mapType = new TypeToken<Map<String, Object>>() {}.getType();
+        Map<String, Object> firstMap = gson.fromJson(firstJson, mapType);
+        Map<String, Object> secondMap = gson.fromJson(secondJson, mapType);
+
+        MapDifference<String, Object> result = Maps.difference(firstMap, secondMap);
+        if (result != null && result.entriesDiffering().isEmpty()) return true;
+        log.info(String.format("difference: %s", result.entriesDiffering()));
+        return false;
+    }
+    
+    @Override
+    protected String getApi() {
+        return null;
+    }
+
+    @Override
+    protected String getHttpMethod() {
+        return null;
+    }
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..89fbb2a05f445d4075f93c81883620bc98c1acf5
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java
@@ -0,0 +1,160 @@
+package org.opengroup.osdu.common;
+
+import com.google.gson.Gson;
+
+import org.opengroup.osdu.models.Legal;
+import org.opengroup.osdu.models.Setup;
+import org.opengroup.osdu.models.TestIndex;
+import org.opengroup.osdu.response.ResponseBase;
+import org.opengroup.osdu.util.HTTPClient;
+
+import com.sun.jersey.api.client.ClientResponse;
+import cucumber.api.Scenario;
+import lombok.extern.java.Log;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.MultivaluedMap;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+import static org.opengroup.osdu.util.Config.*;
+
+@Log
+public abstract class TestsBase {
+    protected HTTPClient httpClient;
+    protected Scenario scenario;
+    protected Map<String, String> tenantMap = new HashMap<>();
+    protected Map<String, TestIndex> inputRecordMap = new HashMap<>();
+
+    public TestsBase(HTTPClient httpClient) {
+        this.httpClient = httpClient;
+        tenantMap.put("tenant1", getDataPartitionIdTenant1());
+        tenantMap.put("tenant2", getDataPartitionIdTenant2());
+        tenantMap.put("common", "common");
+    }
+
+    protected void setUp(List<Setup> inputList, String timeStamp) {
+        for (Setup input : inputList) {
+            TestIndex testIndex = new TestIndex();
+            testIndex.setHttpClient(httpClient);
+            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
+            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
+            testIndex.setMappingFile(input.getMappingFile());
+            testIndex.setRecordFile(input.getRecordFile());
+            List<String> dataGroup = new ArrayList<>();
+            String[] viewerGroup = input.getViewerGroup().split(",");
+            for (int i = 0; i < viewerGroup.length; i++) {
+                viewerGroup[i] = generateActualName(viewerGroup[i], timeStamp) + "." + getEntitlementsDomain();
+                dataGroup.add(viewerGroup[i]);
+            }
+            String[] ownerGroup = input.getOwnerGroup().split(",");
+            for (int i = 0; i < ownerGroup.length; i ++) {
+                ownerGroup[i] = generateActualName(ownerGroup[i], timeStamp) + "." + getEntitlementsDomain();
+                if (dataGroup.indexOf(ownerGroup[i]) > 0) {
+                    dataGroup.add(ownerGroup[i]);
+                }
+            }
+            testIndex.setViewerGroup(viewerGroup);
+            testIndex.setOwnerGroup(ownerGroup);
+            testIndex.setDataGroup(dataGroup.toArray(new String[dataGroup.size()]));
+            inputRecordMap.put(testIndex.getKind(), testIndex);
+        }
+        /******************One time setup for whole feature**************/
+        Runtime.getRuntime().addShutdownHook(new Thread() {
+            public void run() {
+                tearDown();
+            }
+        });
+        for (String kind : inputRecordMap.keySet()) {
+            TestIndex testIndex = inputRecordMap.get(kind);
+            testIndex.setupIndex();
+        }
+
+    }
+
+    /******************One time cleanup for whole feature**************/
+    public void tearDown() {
+        for (String kind : inputRecordMap.keySet()) {
+            TestIndex testIndex = inputRecordMap.get(kind);
+            testIndex.cleanupIndex();
+        }
+    }
+
+    protected abstract String getApi();
+
+    protected abstract String getHttpMethod();
+
+    protected String executeQuery(String api, String payLoad, Map<String, String> headers, String token) {
+        ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), api, payLoad, headers, token);
+        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
+        log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
+        assertEquals(MediaType.APPLICATION_JSON, clientResponse.getType().toString());
+        return clientResponse.getEntity(String.class);
+    }
+
+    protected <T extends ResponseBase> T executeQuery(String api, String payLoad, Map<String, String> headers, String token, Class<T> typeParameterClass) {
+        ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), api, payLoad, headers, token);
+        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
+        return getResponse(clientResponse, typeParameterClass);
+    }
+
+    protected <T extends ResponseBase> T executeQuery(String payLoad, Map<String, String> headers, String token, Class<T> typeParameterClass) {
+        ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), this.getApi(), payLoad, headers, token);
+        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
+        return getResponse(clientResponse, typeParameterClass);
+    }
+
+    private <T extends ResponseBase> T getResponse(ClientResponse clientResponse, Class<T> typeParameterClass) {
+        log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
+        assertEquals(MediaType.APPLICATION_JSON, clientResponse.getType().toString());
+        String responseEntity = clientResponse.getEntity(String.class);
+
+        T response = new Gson().fromJson(responseEntity, typeParameterClass);
+        response.setHeaders(clientResponse.getHeaders());
+        response.setResponseCode(clientResponse.getStatus());
+        return response;
+    }
+
+    protected ClientResponse executeGetRequest(String api, Map<String, String> headers, String token) {
+        return executeRequest(this.getHttpMethod(), api, headers, token);
+    }
+
+    protected ClientResponse executeRequest(String method, String api, Map<String, String> headers, String token) {
+        ClientResponse clientResponse = httpClient.send(method, api, null, headers, token);
+        if (clientResponse.getType() != null) {
+            log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
+        }
+        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
+        return clientResponse;
+    }
+
+    private void logCorrelationIdWithFunctionName(MultivaluedMap<String, String> headers) {
+        log.info(String.format("Scenario Name: %s, Correlation-Id: %s", scenario.getId(), headers.get("correlation-id")));
+    }
+
+    protected String getTenantMapping(String tenant) {
+        if (tenantMap.containsKey(tenant)) {
+            return tenantMap.get(tenant);
+        }
+        return null;
+    }
+
+    protected String generateActualName(String rawName, String timeStamp) {
+        for (String tenant : tenantMap.keySet()) {
+            rawName = rawName.replaceAll(tenant, getTenantMapping(tenant));
+        }
+        return rawName.replaceAll("<timestamp>", timeStamp);
+    }
+
+    protected Legal generateLegalTag() {
+        Legal legal = new Legal();
+        String[] legalTags = {getLegalTag()};
+        legal.setLegaltags(legalTags);
+        String[] otherRelevantCountries = {getOtherRelevantDataCountries()};
+        legal.setOtherRelevantDataCountries(otherRelevantCountries);
+        return legal;
+    }
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Acl.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Acl.java
new file mode 100644
index 0000000000000000000000000000000000000000..a4c1f737f3223edd65c41b320dcfa2cbb3a2d19c
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Acl.java
@@ -0,0 +1,14 @@
+package org.opengroup.osdu.models;
+
+import lombok.Builder;
+import lombok.Data;
+
+@Data
+@Builder
+public class Acl {
+
+    private String[] viewers;
+
+    private String[] owners;
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/FilterInfo.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/FilterInfo.java
new file mode 100644
index 0000000000000000000000000000000000000000..7981ad10f7ea50ee7f8c3d34e7277ee4d35e1ab5
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/FilterInfo.java
@@ -0,0 +1,6 @@
+package org.opengroup.osdu.models;
+
+public class FilterInfo {
+    public String name;
+    public String description;
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Legal.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Legal.java
new file mode 100644
index 0000000000000000000000000000000000000000..57595ffeadede472b86d9ffa3ee5dd37d769d165
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Legal.java
@@ -0,0 +1,20 @@
+package org.opengroup.osdu.models;
+
+import lombok.AllArgsConstructor;
+import lombok.Builder;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+@Data
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+public class Legal {
+
+    private String[] legaltags;
+
+    private String[] otherRelevantDataCountries;
+
+    @lombok.Builder.Default
+    private String status = "compliant";
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Setup.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Setup.java
new file mode 100644
index 0000000000000000000000000000000000000000..c0b3f14daad584b69afbb4432a26e5a8f09023c7
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/Setup.java
@@ -0,0 +1,23 @@
+package org.opengroup.osdu.models;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+import java.util.Map;
+
+import org.opengroup.osdu.util.HTTPClient;
+
+@Data
+@NoArgsConstructor
+public class Setup {
+    private String tenantId;
+    private String kind;
+    private String index;
+    private String viewerGroup;
+    private String ownerGroup;
+    private String mappingFile;
+    private String recordFile;
+    private String schemaFile;
+    private HTTPClient httpClient;
+    private Map<String, String> headers;
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/TestIndex.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/TestIndex.java
new file mode 100644
index 0000000000000000000000000000000000000000..00ac7b1c9bd2fe9780c5d2f40baf761e8aa429c5
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/models/TestIndex.java
@@ -0,0 +1,134 @@
+package org.opengroup.osdu.models;
+
+import com.google.gson.Gson;
+import com.google.gson.JsonElement;
+import com.google.gson.reflect.TypeToken;
+import com.sun.jersey.api.client.ClientResponse;
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+import javax.ws.rs.HttpMethod;
+
+import org.opengroup.osdu.util.ElasticUtils;
+import org.opengroup.osdu.util.FileHandler;
+import org.opengroup.osdu.util.HTTPClient;
+
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Logger;
+
+import static org.junit.Assert.assertEquals;
+import static org.opengroup.osdu.util.Config.*;
+
+@Data
+@NoArgsConstructor
+public class TestIndex {
+    private static final Logger LOGGER = Logger.getLogger(TestIndex.class.getName());
+    private String kind;
+    private String index;
+    private String mappingFile;
+    private String recordFile;
+    private int recordCount;
+    private String schemaFile;
+    private String[] dataGroup;
+    private String[] viewerGroup;
+    private String[] ownerGroup;
+    private HTTPClient httpClient;
+    private Map<String, String> headers;
+    private ElasticUtils elasticUtils = new ElasticUtils();
+    private Gson gson = new Gson();
+
+    public void setHttpClient(HTTPClient httpClient) {
+        this.httpClient = httpClient;
+        headers = httpClient.getCommonHeader();
+    }
+
+    public void setupIndex() {
+        this.addIndex();
+        List<Map<String, Object>> records = getRecordsFromTestFile();
+        this.recordCount = this.elasticUtils.indexRecords(this.index, this.kind, records);
+    }
+
+    public void setupSchema() {
+        ClientResponse clientResponse = this.httpClient.send(HttpMethod.POST, getStorageBaseURL() + "schemas", this.getStorageSchemaFromJson(), headers, httpClient.getAccessToken());
+        if (clientResponse.getType() != null)
+            LOGGER.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
+    }
+
+    public void deleteSchema(String kind) {
+        ClientResponse clientResponse = this.httpClient.send(HttpMethod.DELETE, getStorageBaseURL() + "schemas/" + kind, null, headers, httpClient.getAccessToken());
+        assertEquals(204, clientResponse.getStatus());
+        if (clientResponse.getType() != null)
+            LOGGER.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
+    }
+
+    public void addIndex() {
+        this.elasticUtils.createIndex(this.index, this.getIndexMappingFromJson());
+    }
+
+    public void cleanupIndex() {
+        this.elasticUtils.deleteIndex(index);
+    }
+
+    private String getRecordFile() {
+        return String.format("%s.json", this.recordFile);
+    }
+
+    private String getMappingFile() {
+        return String.format("%s.mapping", this.mappingFile);
+    }
+
+    private String getSchemaFile() {
+        return String.format("%s.schema", this.schemaFile);
+    }
+
+    private List<Map<String, Object>> getRecordsFromTestFile() {
+         try {
+            String fileContent = FileHandler.readFile(getRecordFile());
+            List<Map<String, Object>> records = new Gson().fromJson(
+                    fileContent, new TypeToken<List<Map<String,Object>>>() {}.getType());
+
+            for (Map<String, Object> testRecord : records) {
+                testRecord.put("kind", this.kind);
+                testRecord.put("legal", generateLegalTag());
+                testRecord.put("x-acl", dataGroup);
+                Acl acl = Acl.builder().viewers(viewerGroup).owners(ownerGroup).build();
+                testRecord.put("acl", acl);
+            }
+            return records;
+        } catch (Exception ex) {
+            throw new AssertionError(ex.getMessage());
+        }
+    }
+
+    private String getIndexMappingFromJson() {
+        try {
+            String fileContent = FileHandler.readFile(getMappingFile());
+            JsonElement json = gson.fromJson(fileContent, JsonElement.class);
+            return gson.toJson(json);
+        } catch (Exception e) {
+            throw new AssertionError(e.getMessage());
+        }
+    }
+
+    private String getStorageSchemaFromJson() {
+        try {
+            String fileContent = FileHandler.readFile(getSchemaFile());
+            fileContent = fileContent.replaceAll("KIND_VAL", this.kind);
+            JsonElement json = gson.fromJson(fileContent, JsonElement.class);
+            return gson.toJson(json);
+        } catch (Exception e) {
+            throw new AssertionError(e.getMessage());
+        }
+    }
+
+    private Legal generateLegalTag() {
+        Legal legal = new Legal();
+        String[] legalTags = {getLegalTag()};
+        legal.setLegaltags(legalTags);
+        String[] otherRelevantCountries = {getOtherRelevantDataCountries()};
+        legal.setOtherRelevantDataCountries(otherRelevantCountries);
+        return legal;
+    }
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/CursorQuery.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/CursorQuery.java
new file mode 100644
index 0000000000000000000000000000000000000000..52cb63fa9a2c25b20c4b07428ab70a1067636530
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/CursorQuery.java
@@ -0,0 +1,24 @@
+package org.opengroup.osdu.request;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+import java.util.List;
+
+@Data
+@NoArgsConstructor
+public class CursorQuery {
+    private String cursor;
+    private String kind;
+    private int limit;
+    private String query;
+    private List<String> returnedFields;
+    private SortQuery sort;
+    private Boolean queryAsOwner;
+    private SpatialFilter spatialFilter;
+
+    @Override
+    public String toString() {
+        return new com.google.gson.GsonBuilder().disableHtmlEscaping().create().toJson(this);
+    }
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/Query.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/Query.java
new file mode 100644
index 0000000000000000000000000000000000000000..65962ce54833e905c98df16ae1a941f72379d999
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/Query.java
@@ -0,0 +1,26 @@
+package org.opengroup.osdu.request;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+
+import java.util.List;
+
+@Data
+@NoArgsConstructor
+public class Query {
+
+    private String kind;
+    private Integer offset;
+    private Integer limit;
+    private String query;
+    private SortQuery sort;
+    private Boolean queryAsOwner;
+    private String aggregateBy;
+    private List<String> returnedFields;
+    private SpatialFilter spatialFilter;
+
+    @Override
+    public String toString() {
+        return new com.google.gson.Gson().toJson(this);
+    }
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SortQuery.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SortQuery.java
new file mode 100644
index 0000000000000000000000000000000000000000..32672ca9fb9884a53b5ba1e9637671e0916d72fe
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SortQuery.java
@@ -0,0 +1,14 @@
+package org.opengroup.osdu.request;
+
+import lombok.Data;
+import lombok.NoArgsConstructor;
+import org.elasticsearch.search.sort.SortOrder;
+
+import java.util.List;
+
+@Data
+@NoArgsConstructor
+public class SortQuery {
+    private List<String> field;
+    private List<SortOrder> order;
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SpatialFilter.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SpatialFilter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c8d3eb68c7f4ba4e49bc70a34af00a1406135fa3
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/request/SpatialFilter.java
@@ -0,0 +1,40 @@
+package org.opengroup.osdu.request;
+
+import lombok.*;
+
+import java.util.List;
+
+@Data
+@Builder
+@NoArgsConstructor
+@AllArgsConstructor
+@Setter
+public class SpatialFilter {
+    String field;
+    ByBoundingBox byBoundingBox;
+    ByDistance byDistance;
+    ByGeoPolygon byGeoPolygon;
+
+    @Builder
+    public static class ByDistance {
+        Coordinates point;
+        int distance;
+    }
+
+    @Builder
+    public static class ByBoundingBox {
+        Coordinates topLeft;
+        Coordinates bottomRight;
+    }
+
+    @Builder
+    public static class Coordinates {
+        Double latitude;
+        Double longitude;
+    }
+
+    @Builder
+    public static class ByGeoPolygon {
+        List<Coordinates> points;
+    }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ErrorResponseMock.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ErrorResponseMock.java
new file mode 100644
index 0000000000000000000000000000000000000000..20d0b943867335229ad846d60e6d99b3feceecfc
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ErrorResponseMock.java
@@ -0,0 +1,15 @@
+package org.opengroup.osdu.response;
+
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+
+import java.util.List;
+
+@Data
+@EqualsAndHashCode(callSuper = true)
+public class ErrorResponseMock extends ResponseBase {
+    private List<String> errors;
+    private String code;
+    private String reason;
+    private String message;
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseBase.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseBase.java
new file mode 100644
index 0000000000000000000000000000000000000000..7e981ba8f4edd467cac3a28f5a57ab98edfc51b8
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseBase.java
@@ -0,0 +1,11 @@
+package org.opengroup.osdu.response;
+
+import lombok.Data;
+
+import javax.ws.rs.core.MultivaluedMap;
+
+@Data
+public abstract class ResponseBase {
+    private int responseCode;
+    private MultivaluedMap<String, String> headers;
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseMock.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseMock.java
new file mode 100644
index 0000000000000000000000000000000000000000..510701e438bcd113ff944253285a77e7bac9e2d0
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/response/ResponseMock.java
@@ -0,0 +1,16 @@
+package org.opengroup.osdu.response;
+
+import lombok.Data;
+import lombok.EqualsAndHashCode;
+
+import java.util.List;
+import java.util.Map;
+
+@Data
+@EqualsAndHashCode(callSuper = true)
+public class ResponseMock extends ResponseBase {
+    private List<Map<String, Object>> results;
+    private List<Object> aggregations;
+    private long totalCount;
+    private String cursor;
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/Config.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/Config.java
new file mode 100644
index 0000000000000000000000000000000000000000..ec39559b98b2889986d8560d9b2b6eb78c5eab70
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/Config.java
@@ -0,0 +1,88 @@
+package org.opengroup.osdu.util;
+
+public class Config {
+
+    private static final String DEFAULT_ELASTIC_HOST = "";
+    private static final String DEFAULT_ELASTIC_USER_NAME = "";
+    private static final String DEFAULT_ELASTIC_PASSWORD = "";
+    static final int PORT = 9243;
+
+    private static final String DEFAULT_INDEXER_HOST = "";
+    private static final String DEFAULT_SEARCH_HOST = "";
+    private static final String DEFAULT_STORAGE_HOST = "";
+    private static final String DEFAULT_DATA_PARTITION_ID_TENANT1 = "";
+    private static final String DEFAULT_DATA_PARTITION_ID_TENANT2 = "";
+    private static final String DEFAULT_SEARCH_INTEGRATION_TESTER = "";
+
+    private static final String DEFAULT_TARGET_AUDIENCE = "";
+
+    private static final String DEFAULT_LEGAL_TAG = "";
+    private static final String DEFAULT_OTHER_RELEVANT_DATA_COUNTRIES = "";
+
+    private static final String DEFAULT_ENTITLEMENTS_DOMAIN = "";
+
+
+    public static String getOtherRelevantDataCountries() {
+        return getEnvironmentVariableOrDefaultValue("OTHER_RELEVANT_DATA_COUNTRIES", DEFAULT_OTHER_RELEVANT_DATA_COUNTRIES);
+    }
+
+    public static String getLegalTag() {
+        return getEnvironmentVariableOrDefaultValue("LEGAL_TAG", DEFAULT_LEGAL_TAG);
+    }
+
+    public static String getTargetAudience() {
+        return getEnvironmentVariableOrDefaultValue("INTEGRATION_TEST_AUDIENCE", DEFAULT_TARGET_AUDIENCE);
+    }
+
+    public static String getKeyValue() {
+        return getEnvironmentVariableOrDefaultValue("SEARCH_INTEGRATION_TESTER", DEFAULT_SEARCH_INTEGRATION_TESTER);
+    }
+
+    public static String getDataPartitionIdTenant1() {
+        return getEnvironmentVariableOrDefaultValue("DEFAULT_DATA_PARTITION_ID_TENANT1", DEFAULT_DATA_PARTITION_ID_TENANT1);
+    }
+
+    public static String getDataPartitionIdTenant2() {
+        return getEnvironmentVariableOrDefaultValue("DEFAULT_DATA_PARTITION_ID_TENANT2", DEFAULT_DATA_PARTITION_ID_TENANT2);
+    }
+
+    public static String getUserName() {
+        return getEnvironmentVariableOrDefaultValue("ELASTIC_USER_NAME", DEFAULT_ELASTIC_USER_NAME);
+    }
+
+    public static String getPassword() {
+        return getEnvironmentVariableOrDefaultValue("ELASTIC_PASSWORD", DEFAULT_ELASTIC_PASSWORD);
+    }
+
+    public static String getElasticHost() {
+        return getEnvironmentVariableOrDefaultValue("ELASTIC_HOST", DEFAULT_ELASTIC_HOST);
+    }
+
+    public static String getIndexerBaseURL() {
+        return getEnvironmentVariableOrDefaultValue("INDEXER_HOST", DEFAULT_INDEXER_HOST);
+    }
+
+    public static String getSearchBaseURL() {
+        return getEnvironmentVariableOrDefaultValue("SEARCH_HOST", DEFAULT_SEARCH_HOST);
+    }
+
+    public static String getStorageBaseURL() {
+        return getEnvironmentVariableOrDefaultValue("STORAGE_HOST", DEFAULT_STORAGE_HOST);
+    }
+
+    public static String getEntitlementsDomain() {
+        return getEnvironmentVariableOrDefaultValue("ENTITLEMENTS_DOMAIN", DEFAULT_ENTITLEMENTS_DOMAIN);
+    }
+
+    private static String getEnvironmentVariableOrDefaultValue(String key, String defaultValue) {
+        String environmentVariable = getEnvironmentVariable(key);
+        if (environmentVariable == null) {
+            environmentVariable = defaultValue;
+        }
+        return environmentVariable;
+    }
+
+    private static String getEnvironmentVariable(String propertyKey) {
+        return System.getProperty(propertyKey, System.getenv(propertyKey));
+    }
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/ElasticUtils.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/ElasticUtils.java
new file mode 100644
index 0000000000000000000000000000000000000000..678e8b973bc615692d111159d4a74131ad8ea599
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/ElasticUtils.java
@@ -0,0 +1,288 @@
+package org.opengroup.osdu.util;
+
+import com.google.gson.Gson;
+import lombok.extern.java.Log;
+import org.apache.http.Header;
+import org.apache.http.HttpHost;
+import org.apache.http.message.BasicHeader;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchStatusException;
+import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
+import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
+import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsRequest;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
+import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
+import org.elasticsearch.action.bulk.BulkRequest;
+import org.elasticsearch.action.bulk.BulkResponse;
+import org.elasticsearch.action.index.IndexRequest;
+import org.elasticsearch.action.search.SearchRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.support.master.AcknowledgedResponse;
+import org.elasticsearch.client.*;
+import org.elasticsearch.cluster.metadata.MappingMetaData;
+import org.elasticsearch.common.collect.ImmutableOpenMap;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.builder.SearchSourceBuilder;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.List;
+import java.util.Map;
+import java.util.logging.Level;
+
+
+/**
+ * All util methods to use elastic apis for tests
+ * It should be used only in the Setup or TearDown phase of the test
+ */
+@Log
+public class ElasticUtils {
+
+    private static final int REST_CLIENT_CONNECT_TIMEOUT = 5000;
+    private static final int REST_CLIENT_SOCKET_TIMEOUT = 60000;
+    private static final int REST_CLIENT_RETRY_TIMEOUT = 60000;
+
+    private final TimeValue REQUEST_TIMEOUT = TimeValue.timeValueMinutes(1);
+
+    private final String username;
+    private final String password;
+    private final String host;
+
+    public ElasticUtils() {
+        this.username = Config.getUserName();
+        this.password = Config.getPassword();
+        this.host = Config.getElasticHost();
+    }
+
+    public void createIndex(String index, String mapping) {
+        try {
+            try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+                Settings settings = Settings.builder()
+                        .put("index.number_of_shards", 1)
+                        .put("index.number_of_replicas", 1).build();
+
+                // creating index + add mapping to the index
+                log.info("Creating index with name: " + index);
+                CreateIndexRequest request = new CreateIndexRequest(index, settings);
+                request.source("{\"mappings\":" + mapping + "}", XContentType.JSON);
+                request.timeout(REQUEST_TIMEOUT);
+                CreateIndexResponse response = client.indices().create(request, RequestOptions.DEFAULT);
+
+                //wait for ack
+                for (int i = 0; ; i++) {
+                    if (response.isAcknowledged() && response.isShardsAcknowledged()) {
+                        break;
+                    } else {
+                        log.info("Failed to get confirmation from elastic server, will sleep for 15 seconds");
+                        Thread.sleep(15000);
+                        if (i > 3) {
+                            log.info("Failed to get confirmation from elastic server after 3 retries");
+                            throw new AssertionError("Failed to get confirmation from Elastic cluster");
+                        }
+                    }
+                }
+
+                log.info("Done creating index with name: " + index);
+            }
+        } catch (ElasticsearchStatusException e) {
+            if (e.status() == RestStatus.BAD_REQUEST &&
+                    (e.getMessage().contains("resource_already_exists_exception") || e.getMessage().contains("IndexAlreadyExistsException"))) {
+                log.info("Index already exists. Ignoring error...");
+            }
+        } catch (Exception e) {
+            throw new AssertionError(e.getMessage());
+        }
+    }
+
+    public int indexRecords(String index, String kind, List<Map<String, Object>> testRecords) {
+        log.info("Creating records inside index with name: " + index);
+
+        BulkRequest bulkRequest = new BulkRequest();
+        bulkRequest.timeout(REQUEST_TIMEOUT);
+
+        List<IndexRequest> records = ElasticUtils.getIndexReqFromRecord(index, kind, testRecords);
+        for (IndexRequest record : records) {
+            bulkRequest.add(record);
+        }
+
+        BulkResponse bulkResponse = null;
+        try {
+            try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+                bulkResponse = client.bulk(bulkRequest, RequestOptions.DEFAULT);
+                log.info("Done creating records inside index with name: " + index);
+            }
+        } catch (IOException e) {
+            log.log(Level.SEVERE, "bulk indexing failed", e);
+        }
+
+        // Double check failures
+        if (bulkResponse != null && bulkResponse.hasFailures()) {
+            throw new AssertionError("setup failed in data post to Index");
+        }
+
+        try {
+            try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+                RefreshRequest request = new RefreshRequest(index);
+                RefreshResponse refreshResponse = client.indices().refresh(request, RequestOptions.DEFAULT);
+                log.info(String.format("refreshed index, acknowledged shards: %s | failed shards: %s | total shards: %s ", refreshResponse.getSuccessfulShards(), refreshResponse.getFailedShards(), refreshResponse.getTotalShards()));
+            }
+        } catch (IOException | ElasticsearchException e) {
+            log.log(Level.SEVERE, "index refresh failed", e);
+        }
+
+        return records.size();
+    }
+
+    public void deleteIndex(String index) {
+        try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+            //retry if the elastic cluster is snapshotting and we cant delete it
+            for (int retries = 0; ; retries++) {
+                try {
+                    log.info("Deleting index with name: " + index + ", retry count: " + retries);
+                    DeleteIndexRequest request = new DeleteIndexRequest(index);
+                    client.indices().delete(request, RequestOptions.DEFAULT);
+                    log.info("Done deleting index with name: " + index);
+                    return;
+                } catch (ElasticsearchException e) {
+                    if (e.status() == RestStatus.NOT_FOUND) {
+                        return;
+                    } else if (e.getMessage().contains("Cannot delete indices that are being snapshotted")) {
+                        closeIndex(client, index);
+                        log.info(String.format("skipping %s index delete, as snapshot is being run, closing the index instead", index));
+                        return;
+                    } else if (retries < 4) {
+                        log.info("Retrying to delete index due to following error: " + e.getMessage());
+                        try {
+                            Thread.sleep(12000);
+                        } catch (InterruptedException e1) {
+                            e1.printStackTrace();
+                        }
+                    } else {
+                        closeIndex(client, index);
+                        log.info(String.format("maximum retries: %s reached for index: %s delete, closing the index instead", retries, index));
+                    }
+                }
+            }
+        } catch (IOException e) {
+            throw new AssertionError(e.getMessage());
+        }
+    }
+
+    public long fetchRecords(String index) throws IOException {
+        try {
+            try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+                SearchRequest request = new SearchRequest(index);
+                SearchResponse searchResponse = client.search(request, RequestOptions.DEFAULT);
+                return searchResponse.getHits().totalHits;
+            }
+        } catch (ElasticsearchStatusException e) {
+            log.log(Level.INFO, String.format("Elastic search threw exception: %s", e.getMessage()));
+            return -1;
+        }
+    }
+
+    public long fetchRecordsByExistQuery(String index, String attributeName) throws IOException {
+        try {
+            try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+                SearchRequest searchRequest = new SearchRequest(index);
+                SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
+                searchSourceBuilder.query(QueryBuilders.existsQuery(attributeName));
+                searchRequest.source(searchSourceBuilder);
+
+                SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
+                return searchResponse.getHits().totalHits;
+            }
+        } catch (ElasticsearchStatusException e) {
+            log.log(Level.INFO, String.format("Elastic search threw exception: %s", e.getMessage()));
+            return -1;
+        }
+    }
+
+    public ImmutableOpenMap<String, MappingMetaData> getMapping(String index) throws IOException {
+        try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+            GetMappingsRequest request = new GetMappingsRequest();
+            request.indices(index);
+            GetMappingsResponse response = client.indices().getMapping(request, RequestOptions.DEFAULT);
+            ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> allMappings = response.mappings();
+            return allMappings.get(index);
+        }
+    }
+
+    public void refreshIndex(String index) throws IOException {
+        try (RestHighLevelClient client = ElasticUtils.createClient(username, password, host)) {
+            try {
+                RefreshRequest request = new RefreshRequest(index);
+                client.indices().refresh(request, RequestOptions.DEFAULT);
+            } catch (ElasticsearchException exception) {
+                log.info(String.format("index: %s refresh failed. message: %s", index, exception.getDetailedMessage()));
+            }
+        }
+    }
+
+    private boolean closeIndex(RestHighLevelClient client, String index) {
+        try {
+            CloseIndexRequest request = new CloseIndexRequest(index);
+            request.timeout(TimeValue.timeValueMinutes(1));
+            AcknowledgedResponse closeIndexResponse = client.indices().close(request, RequestOptions.DEFAULT);
+            return closeIndexResponse.isAcknowledged();
+        } catch (ElasticsearchException | IOException exception) {
+            log.info(String.format("index: %s close failed. message: %s", index, exception.getMessage()));
+        }
+        return false;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static List<IndexRequest> getIndexReqFromRecord(String index, String kind, List<Map<String, Object>> testRecords) {
+        List<IndexRequest> dataList = new ArrayList<>();
+        Gson gson = new Gson();
+        try {
+            for (Map<String, Object> record : testRecords) {
+                String id = (String) record.get("id");
+                Map<String, Object> mapData = gson.fromJson(gson.toJson(record), Map.class);
+                IndexRequest indexRequest = new IndexRequest(index, kind.split(":")[2], id).source(mapData);
+                dataList.add(indexRequest);
+            }
+        } catch (Exception e) {
+            throw new AssertionError(e.getMessage());
+        }
+        return dataList;
+    }
+
+    private static RestHighLevelClient createClient(String username, String password, String host) {
+
+        RestHighLevelClient restHighLevelClient;
+        int port = Config.PORT;
+        try {
+            String rawString = String.format("%s:%s", username, password);
+            RestClientBuilder builder = RestClient.builder(new HttpHost(host, port, "https"));
+            builder.setRequestConfigCallback(requestConfigBuilder -> requestConfigBuilder.setConnectTimeout(REST_CLIENT_CONNECT_TIMEOUT)
+                    .setSocketTimeout(REST_CLIENT_SOCKET_TIMEOUT));
+            builder.setMaxRetryTimeoutMillis(REST_CLIENT_RETRY_TIMEOUT);
+
+            Header[] defaultHeaders = new Header[]{
+                    new BasicHeader("client.transport.nodes_sampler_interval", "30s"),
+                    new BasicHeader("client.transport.ping_timeout", "30s"),
+                    new BasicHeader("client.transport.sniff", "false"),
+                    new BasicHeader("request.headers.X-Found-Cluster", Config.getElasticHost()),
+                    new BasicHeader("cluster.name", Config.getElasticHost()),
+                    new BasicHeader("xpack.security.transport.ssl.enabled", Boolean.toString(true)),
+                    new BasicHeader("Authorization", String.format("Basic %s", Base64.getEncoder().encodeToString(rawString.getBytes()))),
+            };
+
+            builder.setDefaultHeaders(defaultHeaders);
+            restHighLevelClient = new RestHighLevelClient(builder);
+
+        } catch (Exception e) {
+            throw new AssertionError("Setup elastic error");
+        }
+        return restHighLevelClient;
+    }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/FileHandler.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/FileHandler.java
new file mode 100644
index 0000000000000000000000000000000000000000..678cb91204d3f786437090737dc9a29d1e1acfc7
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/FileHandler.java
@@ -0,0 +1,24 @@
+package org.opengroup.osdu.util;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.nio.charset.StandardCharsets;
+
+public class FileHandler {
+
+    public static String readFile(String fileName) throws IOException {
+        InputStream inputStream = FileHandler.class.getClass().getResourceAsStream(String.format("/testData/%s",fileName));
+        if(inputStream == null) {
+            throw new IOException();
+        }
+        ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); 
+        byte[] buffer = new byte[1024];
+        int length;
+        while ((length = inputStream.read(buffer)) != -1) {
+            outputStream.write(buffer, 0, length);
+        }
+        return outputStream.toString(StandardCharsets.UTF_8.toString());
+    }
+    
+}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/HTTPClient.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/HTTPClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..28d7aa69c95bbf74a9cb547f549c6b54753aa3c1
--- /dev/null
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/util/HTTPClient.java
@@ -0,0 +1,93 @@
+package org.opengroup.osdu.util;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import lombok.ToString;
+import lombok.extern.java.Log;
+
+import javax.net.ssl.HttpsURLConnection;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.TrustManager;
+import javax.net.ssl.X509TrustManager;
+import javax.ws.rs.core.MediaType;
+import java.security.SecureRandom;
+import java.security.cert.X509Certificate;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Random;
+
+@Log
+@ToString
+public abstract class HTTPClient {
+
+    private static Random random = new Random();
+    private final int MAX_ID_SIZE = 50;
+
+    private static final String HEADER_CORRELATION_ID = "correlation-id";
+    
+    public abstract String getAccessToken();
+
+    private static Client getClient() {
+        TrustManager[] trustAllCerts = new TrustManager[]{new X509TrustManager() {
+            @Override
+            public X509Certificate[] getAcceptedIssuers() {
+                return null;
+            }
+
+            @Override
+            public void checkClientTrusted(X509Certificate[] certs, String authType) {
+            }
+
+            @Override
+            public void checkServerTrusted(X509Certificate[] certs, String authType) {
+            }
+        }};
+
+        try {
+            SSLContext sc = SSLContext.getInstance("TLS");
+            sc.init(null, trustAllCerts, new SecureRandom());
+            HttpsURLConnection.setDefaultSSLSocketFactory(sc.getSocketFactory());
+        } catch (Exception ignored) {
+        }
+        return Client.create();
+    }
+
+    public ClientResponse send(String httpMethod, String url, String payLoad, Map<String, String> headers, String token) {
+        ClientResponse response;
+        try {
+            String correlationId = java.util.UUID.randomUUID().toString();
+            log.info(String.format("Request correlation id: %s", correlationId));
+            headers.put(HEADER_CORRELATION_ID, correlationId);
+            Client client = getClient();
+            client.setReadTimeout(180000);
+            client.setConnectTimeout(10000);
+            WebResource webResource = client.resource(url);
+            response = this.getClientResponse(httpMethod, payLoad, webResource, headers, token);
+        } catch (Exception e) {
+            e.printStackTrace();
+            throw new AssertionError("Error: Send request error", e);
+        }
+        log.info("waiting on response");
+        return response;
+    }
+
+    private ClientResponse getClientResponse(String httpMethod, String requestBody, WebResource webResource, Map<String, String> headers, String token) {
+        final WebResource.Builder builder = webResource.accept(MediaType.APPLICATION_JSON).type(MediaType.APPLICATION_JSON).header("Authorization", token);
+        headers.forEach(builder::header);
+        log.info("making request to datalake api");
+        return builder.method(httpMethod, ClientResponse.class, requestBody);
+    }
+
+    public Map<String, String> getCommonHeader() {
+        Map<String, String> headers = new HashMap<>();
+        headers.put("data-partition-id", Config.getDataPartitionIdTenant1());
+        return headers;
+    }
+
+    public static Map<String, String> overrideHeader(Map<String, String> currentHeaders, String... partitions) {
+        String value = String.join(",", partitions);
+        currentHeaders.put("data-partition-id", value);
+        return currentHeaders;
+    }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature b/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature
new file mode 100644
index 0000000000000000000000000000000000000000..519b648fa6840c9e320ffa1739a9e4723fe47030
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature
@@ -0,0 +1,27 @@
+Feature: Delete search indexes
+  If a user wants to delete any index, search should offer ways to do the same.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                     | index                                    | mappingFile | recordFile |
+      | tenant1:testdelete<timestamp>:well:1.0.0 | tenant1-testdelete<timestamp>-well-1.0.0 | records_1   | records_1  |
+      | tenant1:testdelete<timestamp>:well:2.0.0 | tenant1-testdelete<timestamp>-well-2.0.0 | records_2   | records_2  |
+
+
+  Scenario Outline: Delete a given index from the Search
+    When I send a delete request with <kind>
+    Then the index should get delete and I should get <response_code> response
+
+    Examples:
+      | kind                                       | response_code |
+      | "tenant1:testdelete<timestamp>:well:1.0.0" | 200           |
+
+  Scenario Outline: Fail the request for deletion of a index from the Search with invalid inputs
+    When I send a delete request with <kind>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | kind                                      | response_code | reponse_type           | response_message                                         | errors                                                                                                                                                                                  |
+      | "tenant1:testdelete<timestamp>:*:*"       | 400           | "Bad Request"          | "Invalid parameters were given on search request"        | "Not a valid record kind. Found: tenant1:testdelete<timestamp>:*:*, required format is partition:data-source-id:type:schema-version with no wildcards e.g. tenant:well:wellbore:1.0.2" |
+      | "tenant1:testdatasource:wellrating:9.0.0" | 404           | "Index deletion error" | "Kind tenant1:testdatasource:wellrating:9.0.0 not found" | ""                                                                                                                                                                                      |
+
diff --git a/testing/indexer-test-core/src/main/resources/features/indexrecord/IndexRecord.feature b/testing/indexer-test-core/src/main/resources/features/indexrecord/IndexRecord.feature
new file mode 100644
index 0000000000000000000000000000000000000000..e0fffb72205939a4bb8913395f027fe80145ef0f
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/indexrecord/IndexRecord.feature
@@ -0,0 +1,27 @@
+Feature: Indexing of the documents
+  This feature deals with validation of the documents in Elastic Search ingested with different kinds and attributes.
+
+  Background:
+    Given the schema is created with the following kind
+      | kind                                    | index                                   | schemaFile      |
+      | tenant1:testindex<timestamp>:well:1.0.0 | tenant1-testindex<timestamp>-well-1.0.0 | index_records_1 |
+      | tenant1:testindex<timestamp>:well:2.0.0 | tenant1-testindex<timestamp>-well-2.0.0 | index_records_2 |
+      | tenant1:testindex<timestamp>:well:3.0.0 | tenant1-testindex<timestamp>-well-3.0.0 | index_records_1 |
+
+  Scenario Outline: Ingest the record and Index in the Elastic Search
+    When I ingest records with the <recordFile> with <acl> for a given <kind>
+    Then I should get the <number> documents for the <index> in the Elastic Search
+    Then I should get the elastic <mapping> for the <type> and <index> in the Elastic Search
+
+    Examples:
+      | kind                                      | recordFile        | number | index                                     | type   | acl                                    | mapping                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                                |
+      | "tenant1:testindex<timestamp>:well:1.0.0" | "index_records_1" | 5      | "tenant1-testindex<timestamp>-well-1.0.0" | "well" | "data.default.viewers@opendes"         | "{"mappings":{"well":{"dynamic":"false","properties":{"acl":{"properties":{"owners":{"type":"keyword"},"viewers":{"type":"keyword"}}},"ancestry":{"properties":{"parents":{"type":"keyword"}}},"data":{"properties":{"Basin":{"type":"text"},"Country":{"type":"text"},"County":{"type":"text"},"EmptyAttribute":{"type":"text"},"Established":{"type":"date"},"Field":{"type":"text"},"Location":{"type":"geo_point"},"OriginalOperator":{"type":"text"},"Rank":{"type":"integer"},"Score":{"type":"integer"},"State":{"type":"text"},"WellName":{"type":"text"},"WellStatus":{"type":"text"},"WellType":{"type":"text"}}},"id":{"type":"keyword"},"index":{"properties":{"lastUpdateTime":{"type":"date"},"statusCode":{"type":"integer"},"trace":{"type":"text"}}},"kind":{"type":"keyword"},"legal":{"properties":{"legaltags":{"type":"keyword"},"otherRelevantDataCountries":{"type":"keyword"},"status":{"type":"keyword"}}},"namespace":{"type":"keyword"},"type":{"type":"keyword"},"version":{"type":"long"},"x-acl":{"type":"keyword"}}}}}" |
+      | "tenant1:testindex<timestamp>:well:3.0.0" | "index_records_1" | 5      | "tenant1-testindex<timestamp>-well-3.0.0" | "well" | "data.default.viewers@opendes"         | "{"mappings":{"well":{"dynamic":"false","properties":{"acl":{"properties":{"owners":{"type":"keyword"},"viewers":{"type":"keyword"}}},"ancestry":{"properties":{"parents":{"type":"keyword"}}},"data":{"properties":{"Basin":{"type":"text"},"Country":{"type":"text"},"County":{"type":"text"},"EmptyAttribute":{"type":"text"},"Established":{"type":"date"},"Field":{"type":"text"},"Location":{"type":"geo_point"},"OriginalOperator":{"type":"text"},"Rank":{"type":"integer"},"Score":{"type":"integer"},"State":{"type":"text"},"WellName":{"type":"text"},"WellStatus":{"type":"text"},"WellType":{"type":"text"}}},"id":{"type":"keyword"},"index":{"properties":{"lastUpdateTime":{"type":"date"},"statusCode":{"type":"integer"},"trace":{"type":"text"}}},"kind":{"type":"keyword"},"legal":{"properties":{"legaltags":{"type":"keyword"},"otherRelevantDataCountries":{"type":"keyword"},"status":{"type":"keyword"}}},"namespace":{"type":"keyword"},"type":{"type":"keyword"},"version":{"type":"long"},"x-acl":{"type":"keyword"}}}}}" |
+
+  Scenario Outline: Ingest the record and Index in the Elastic Search with bad attribute
+    When I ingest records with the <recordFile> with <acl> for a given <kind>
+    Then I should get the <number> documents for the <index> in the Elastic Search with out <skippedAttribute>
+
+    Examples:
+      | kind                                      | recordFile        | number | index                                     | skippedAttribute | acl                                   |
+      | "tenant1:testindex<timestamp>:well:2.0.0" | "index_records_2" | 4      | "tenant1-testindex<timestamp>-well-2.0.0" | "data.Location"  | "data.default.viewers@opendes" |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature b/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature
new file mode 100644
index 0000000000000000000000000000000000000000..ee526ad538bbb85f6e02e182d6e009508fb0e6a2
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature
@@ -0,0 +1,26 @@
+Feature: Get schema for a given kind
+  Allow user to find the attributes indexed and their respective data types.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                     | index                                    | mappingFile |
+      | tenant1:testschema<timestamp>:well:1.0.0 | tenant1-testschema<timestamp>-well-1.0.0 | records_1   |
+
+  Scenario Outline: Get a schema from search for a kind
+    When I send get schema request with <kind>
+    And I send request to tenant <tenant>
+    Then I should get <response_code> status with response <response_message>
+
+    Examples:
+      | tenant    | kind                                       | response_code | response_message																																																																																																																																																																								|
+      | "tenant1" | "tenant1:testschema<timestamp>:well:1.0.0" | 200           | "{"tenant1-testschema<timestamp>-well-1.0.0":{"mappings":{"well":{"properties":{"acl":{"properties":{"owners":{"type":"keyword"},"viewers":{"type":"keyword"}}},"legal":{"properties":{"legaltags":{"type":"keyword"},"otherRelevantDataCountries":{"type":"keyword"},"status":{"type":"keyword"}}},"data":{"properties":{"Basin":{"type":"text"},"Country":{"type":"text"},"County":{"type":"text"},"EmptyAttribute":{"type":"text"},"Established":{"type":"date"},"Field":{"type":"text"},"Location":{"type":"geo_point"},"OriginalOperator":{"type":"text"},"Rank":{"type":"integer"},"Score":{"type":"integer"},"State":{"type":"text"},"WellName":{"type":"text"},"WellStatus":{"type":"text"},"WellType":{"type":"text"}}},"id":{"type":"keyword"},"kind":{"type":"keyword"},"namespace":{"type":"keyword"},"type":{"type":"keyword"},"version":{"type":"keyword"},"x-acl":{"type":"keyword"}}}}}}" |
+
+  Scenario Outline: Fail request to get schema from search with invalid inputs
+    When I send get schema request with <kind>
+    And I send request to tenant <tenant>
+    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | tenant    | kind                                      | response_code | response_type    | response_message                                         | errors                                                                                                                                                                                  |
+      | "tenant1" | "tenant1-testschema<timestamp>:*:*"       | 400           | "Bad Request"    | "Invalid parameters were given on search request"        | "Not a valid record kind. Found: tenant1-testschema<timestamp>:*:*, required format is partition:data-source-id:type:schema-version with no wildcards e.g. tenant:well:wellbore:1.0.2" |
+      | "tenant1" | "tenant1:testdatasource:wellrating:9.0.0" | 404           | "Kind not found" | "Kind tenant1:testdatasource:wellrating:9.0.0 not found" | ""                                                                                                                                                                                      |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature b/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature
new file mode 100644
index 0000000000000000000000000000000000000000..4e37a677da089ca41f5bdb4d7efc87e2e6f43060
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature
@@ -0,0 +1,103 @@
+Feature: Search with different queries
+  To allow a user to find his data quickly, search should offer multiple ways to search data.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                   | ownerGroup                  |
+      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes  | data.default.owners@opendes |
+      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes  | data.default.owners@opendes |
+      | common:testquery<timestamp>:well:1.0.0  | common-testquery<timestamp>-well-1.0.0  | records_1   | records_1  | data.default.viewers@opendes  | data.default.owners@opendes |
+      | common:testquery<timestamp>:well:2.0.0  | common-testquery<timestamp>-well-2.0.0  | records_2   | records_2  | data.default.viewers@opendes  | data.default.owners@opendes |
+
+	@ignore
+  Scenario Outline: Search data in a given kind
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenants <tenants>
+    Then I should get in response <count> records
+
+    Examples:
+      | tenants            | kind                                      | query          | limit | offset | returned_fields | count |
+      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "OSDU" | None  | None   | All             | 6     |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "OSDU" | None  | None   | All             | 3     |
+
+
+	@ignore
+  Scenario Outline: Search data in a given a kind with invalid inputs
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I send request to tenants <tenants>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | tenants            | kind                                | query | limit | offset | response_code | reponse_type    | response_message                                    | errors |
+      | "tenant2","common" | "*:testquery<timestamp>:well:1.0.0" | None  | None  | None   | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
+
+	@ignore
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I send request to tenants <tenants>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    Then I should get in response <count> records
+
+    Examples:
+      | tenants            | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
+      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 2     |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 1     |
+
+	@ignore
+  Scenario Outline: Search data across the kinds with distance inputs
+    When I send <query> with <kind>
+    And I send request to tenants <tenants>
+    And I apply geographical query on field <field>
+    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
+    Then I should get in response <count> records
+
+    Examples:
+      | tenants            | kind                                      | query               | field           | latitude | longitude | distance | count |
+      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "Under development" | "data.Location" | 0        | 0         | 20000000 | 6     |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "Under development" | "data.Location" | 0        | 0         | 20000000 | 3     |
+
+	@ignore
+  Scenario Outline: Search data across the kinds
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenants <tenants>
+    Then I should get in response <count> records
+
+    Examples:
+      | tenants            | kind                               | query                  | limit | offset | returned_fields | count |
+      | "tenant1","common" | "*:testquery<timestamp>:*:*"       | "OSDU OFFICE*" | 12    | None   | All             | 12    |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:*:*" | "OSDU OFFICE*" | 12    | None   | All             | 6     |
+
+
+	@ignore
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I send request to tenants <tenants>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    Then I should get in response <count> records
+
+    Examples:
+      | tenants            | kind                               | query | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
+      | "tenant1","common" | "*:testquery<timestamp>:*:*"       | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 6     |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 3     |
+
+	@ignore
+  Scenario Outline: Search data across the kinds with geo polygon inputs
+    When I send <query> with <kind>
+    And I send request to tenants <tenants>
+    And define geo polygon with following points <points_list>
+    And I apply geographical query on field <field>
+    Then I should get in response <count> records
+    Examples:
+      | tenants            | kind                                      | query | field           | points_list                                                               | count |
+      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | None  | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904) | 4     |
+      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904) | 2     |
diff --git a/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature b/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature
new file mode 100644
index 0000000000000000000000000000000000000000..7b341d66585a55cfd057932d33e215c52d19f793
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature
@@ -0,0 +1,205 @@
+Feature: Search with different queries
+  To allow a user to find his data quickly, search should offer multiple ways to search data.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                  | ownerGroup 				           |
+      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes | data.default.owners@opendes  |
+      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes | data.default.testowners@opendes   |
+
+  Scenario Outline: Search data in a given kind
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenant <tenant>
+    Then I should get in response <count> records
+
+    Examples:
+      | tenant    | kind                                      | query                                | limit | offset | returned_fields | count |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4"      | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None                                 | 0     | None   | NULL            | 3     |
+      ######################################Range Query test cases##########################################################################
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:{1 TO 3}"                 | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:[10 TO 20]"               | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:>=2"                      | None  | None   | All             | 2     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Established:{* TO 2012-01-01}" | None  | None   | All             | 2     |
+      ######################################Text Query test cases###########################################################################
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "OSDU"                               | None  | None   | All             | 3     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:2.0.0" | "data.OriginalOperator:OFFICE6"      | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | ""OFFICE2" \| OFFICE3"               | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:2.0.0" | "data.Well\*:(Data Lake Cloud)"      | None  | None   | All             | 3     |
+
+  Scenario Outline: Search data in a given a kind with invalid inputs
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I send request to tenant <tenant>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | tenant    | kind                                      | query | limit | offset | response_code | reponse_type    | response_message                                    | errors                                     |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | -1    | None   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'limit' must be equal or greater than 0"  |
+      | "tenant1" | "invalid"                                 | None  | 1     | None   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "Not a valid record kind. Found: invalid"  |
+      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | 1     | -1     | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'offset' must be equal or greater than 0" |
+      | "tenant2" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | None  | None   | 401           | "Access denied" | "The user is not authorized to perform this action" | ""                                         |
+
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    Then I should get in response <count> records
+
+    Examples:
+      | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None                         | "data.Location" | 45                | -100               | 0                     | 0                      | 2     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None                         | "data.Location" | 45                | -80                | 0                     | 0                      | 0     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 1     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 10                | -100               | 0                     | 0                      | 0     |
+
+  Scenario Outline: Search data across the kinds with invalid bounding box inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | response_code | reponse_type  | response_message                                  | errors                                                                   |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | 0                  | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top latitude cannot be the same as bottom latitude: 0.0 == 0.0"         |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | -100               | -10                   | -100                   | 400           | "Bad Request" | "Invalid parameters were given on search request" | "left longitude cannot be the same as right longitude: -100.0 == -100.0" |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 10                | -100               | 10                    | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top latitude cannot be the same as bottom latitude: 10.0 == 10.0"       |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | -95                   | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]"                         |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | -100               | 10                    | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top corner is below bottom corner: 0.0 vs. 10.0"                        |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | None              | None               | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "Invalid payload"                                                        |
+      | "tenant1:testquery<timestamp>:*:*"        | None                         | "officeAddress" | 45                | -100               | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | ""                                                                       |
+
+  Scenario Outline: Search data across the kinds with distance inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
+    Then I should get in response <count> records
+
+    Examples:
+      | kind                                      | query               | field           | latitude | longitude | distance | count |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "Under development" | "data.Location" | 0        | 0         | 20000000 | 3     |
+      | "tenant1:testquery<timestamp>:*:*"        | "TEXAS OR TX"       | "data.Location" | 45       | -100      | 20000000 | 2     |
+
+  Scenario Outline: Search data across the kinds with invalid distance inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | kind                               | query          | field           | latitude | longitude | distance | response_code | reponse_type  | response_message                                  | errors                                              |
+      | "tenant1:testquery<timestamp>:*:*" | "OFFICE - 2"          | "data.Location" | -45      | -200      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'longitude' value is out of the range [-180, 180]" |
+      | "tenant1:testquery<timestamp>:*:*" | "TEXAS OR USA" | "data.Location" | -95      | -100      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]"    |
+      | "tenant1:testquery<timestamp>:*:*" | "Harris"       | "ZipCode"       | -45      | -200      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'longitude' value is out of the range [-180, 180]" |
+
+  Scenario Outline: Search data across the kinds
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the offset of starting point as <offset>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenant <tenant>
+    Then I should get in response <count> records
+
+    Examples:
+      | tenant    | kind                               | query                               | limit | offset | returned_fields | count |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | 1     | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | None  | 2      | All             | 4     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | None  | None   | Country         | 6     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "OSDU OFFICE*"              | None  | None   | All             | 6     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "SCHLUM OFFICE"                     | None  | None   | All             | 6     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | ""SCHLUM OFFICE""                   | None  | None   | All             | 0     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.Country:USA"                  | None  | None   | All             | 2     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "TEXAS AND OFFICE3"                    | None  | None   | All             | 1     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.OriginalOperator:OFFICE5 OR OFFICE2" | None  | None   | All             | 2     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.OriginalOperator:STI OR HT"   | None  | None   | All             | 0     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "_exists_:data.Basin"               | None  | None   | All             | 4     |
+      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.Well\*:"Data Lake Cloud""     | None  | None   | All             | 5     |
+
+
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    Then I should get in response <count> records
+
+    Examples:
+      | kind                               | query | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
+      | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 3     |
+      | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 10                | -100               | 0                     | 0                      | 0     |
+
+  Scenario Outline: Search data across the kinds with geo polygon inputs
+    When I send <query> with <kind>
+    And define geo polygon with following points <points_list>
+    And I apply geographical query on field <field>
+    Then I should get in response <count> records
+    Examples:
+      | kind                                      | query       | field           | points_list                                                                                                        | count |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904)                                          | 2     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (33.201112;-113.282863) , (33.456305;-98.269744) , (52.273184;-93.593904)                                          | 0     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "OFFICE4" | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904)                                          | 1     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (14.29056;72.18936)     , (22.13762;72.18936)    , (22.13762;77.18936) , (14.29056;77.18936) , (14.29056;72.18936) | 1     |
+
+  Scenario Outline: Search data across the kinds with invalid geo polygon inputs
+    When I send <query> with <kind>
+    And define geo polygon with following points <points_list>
+    And I apply geographical query on field <field>
+    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | kind                                      | query | field           | points_list                                                                | response_code | response_type | response_message                                  | errors                                           |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (26.595873;-68.457186)   , (52.273184;-93.593904)                          | 400           | "Bad Request" | "Invalid parameters were given on search request" | "too few points defined for geo polygon query"   |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (516.595873;-68.457186)  , (52.273184;-94.593904) , (95.273184;-93.593904) | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]" |
+
+  Scenario Outline: Search data and sort the results with the given sort fields and order
+    When I send <query> with <kind>
+    And I want the results sorted by <sort>
+    Then I should get records in right order first record id: <first_record_id>, last record id: <last_record_id>
+    Examples:
+      | kind                                      | query       | sort                                                                         | first_record_id       | last_record_id        |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":["ASC"]}                                             | "test:well:1.0.0:1"   | "test:well:2.0.0:3"   |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":["DESC"]}                                            | "test:well:2.0.0:3"   | "test:well:1.0.0:1"   |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["namespace","data.Rank"],"order":["ASC","DESC"]}                   | "test:well:1.0.0:3"   | "test:well:2.0.0:1"   |
+
+  Scenario Outline: Search data in a given kind with invalid sort field
+    When I send <query> with <kind>
+    And I want the results sorted by <sort>
+    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | kind                                      | query       | sort                                           | response_code | response_type   | response_message                                    | errors                                                             |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":[],"order":["ASC"]}                   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.field' can not be null or empty"                            |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":[]}                    | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.order' can not be null or empty"                            |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id","data.Rank"],"order":["DESC"]}  | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.field' and 'sort.order' size do not match"                  |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":[null]}                | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "Not a valid order option. It can only be either 'ASC' or 'DESC'"                       |
+
+  Scenario Outline: Search data in a given kind with different searchAs modes
+    When I send <query> with <kind>
+    And I want to search as owner <is_owner>
+    Then I should get in response <count> records
+
+    Examples:
+      | kind                                      | query       | is_owner | count |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | true     | 3     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | false    | 3     |
+      | "tenant1:testquery<timestamp>:well:2.0.0" | None        | true     | 0     |
+      | "tenant1:testquery<timestamp>:well:2.0.0" | None        | false    | 3     |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | false    | 6     |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | true     | 3     |
+      | "tenant1:testquery<timestamp>:well:*"     | "OFFICE4" | true     | 1     |
+      | "tenant1:testquery<timestamp>:well:*"     | None        | None     | 6     |
+
+  Scenario Outline: Search data in a given kind with aggregateBy field
+    When I send <query> with <kind>
+    And I want to aggregate by <aggregateBy>
+    Then I should get <count> unique values
+
+    Examples:
+      | kind                                      | query       | aggregateBy | count |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "namespace" | 1     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "type"      | 1     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | "OFFICE4" | "data.Rank" | 1     |
+      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Rank" | 3     |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature b/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature
new file mode 100644
index 0000000000000000000000000000000000000000..5e078b7249c9c33c67675ddab0b7ba33e191803d
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature
@@ -0,0 +1,56 @@
+Feature: Search recursively on cursor with different queries
+  To allow a user to find his data quickly, search should offer multiple ways to search data and iterate over all the results.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                     | index                                    | mappingFile | recordFile | viewerGroup                         | ownerGroup                          |
+      | tenant1:testcursor<timestamp>:well:1.0.0 | tenant1-testcursor<timestamp>-well-1.0.0 | records_1   | records_1  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1 |
+      | tenant1:testcursor<timestamp>:well:2.0.0 | tenant1-testcursor<timestamp>-well-2.0.0 | records_2   | records_2  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1 |
+      | common:testcursor<timestamp>:well:1.0.0  | common-testcursor<timestamp>-well-1.0.0  | records_1   | records_1  | data.search.integrationtest@common  | data.search.integrationtest@common  |
+      | common:testcursor<timestamp>:well:2.0.0  | common-testcursor<timestamp>-well-2.0.0  | records_2   | records_2  | data.search.integrationtest@common  | data.search.integrationtest@common  |
+
+	@ignore
+  Scenario Outline: Search recursively page by page data across the kinds
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenants <q1_tenants>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenants <q2_tenants>
+    Then I should get in response <final_count> records
+
+    Examples:
+      | q1_tenants         | q2_tenants         | kind                                | query                   | limit | returned_fields | first_count | final_count |
+      | "tenant1","common" | "tenant1","common" | "*:testcursor<timestamp>:*:*"       | "TX OR TEXAS OR FRANCE" | 3     | All             | 3           | 3           |
+      | "tenant1","common" | "tenant1","common" | "tenant1:testcursor<timestamp>:*:*" | "TX OR TEXAS OR FRANCE" | 3     | All             | 3           | 0           |
+
+
+	@ignore
+  Scenario Outline:  Search recursively page by page data across the kinds with invalid inputs and headers
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenants <q1_tenants>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenants <q2_tenants>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | q1_tenants         | q2_tenants         | kind                                 | query | limit | returned_fields | first_count | response_code | reponse_type    | response_message                                    | errors |
+      | "tenant1","common" | "tenant2","common" | "*:testcursor<timestamp>:well:1.0.0" | None  | 1     | All             | 1           | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
+
+	@ignore
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    And I limit the count of returned results to <limit>
+    And I send request to tenants <q1_tenants>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenants <q2_tenants>
+    Then I should get in response <final_count> records
+
+    Examples:
+      | q1_tenants         | q2_tenants         | kind                                       | query | limit | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | first_count | final_count |
+      | "tenant1","common" | "tenant1","common" | "*:testcursor<timestamp>:well:1.0.0"       | None  | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 4           | 0           |
+      | "tenant1","common" | "tenant1","common" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 2           | 0           |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature b/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature
new file mode 100644
index 0000000000000000000000000000000000000000..83a5171560414c2dcbbd5f5df801993aa1b2d887
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature
@@ -0,0 +1,92 @@
+Feature: Search recursively on cursor with different queries
+  To allow a user to find his data quickly, search should offer multiple ways to search data and iterate over all the results.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                     | index                                    | mappingFile | recordFile | viewerGroup                  | ownerGroup                  |
+      | tenant1:testcursor<timestamp>:well:1.0.0 | tenant1-testcursor<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes | data.default.owners@opendes |
+      | tenant1:testcursor<timestamp>:well:2.0.0 | tenant1-testcursor<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes | data.default.testowners@opendes |
+
+  Scenario Outline: Search recursively page by page data across the kinds
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenant <q1_tenant>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenant <q2_tenant>
+    Then I should get in response <final_count> records
+
+    Examples:
+      | q1_tenant | q2_tenant | kind                                | query                     | limit | returned_fields | first_count | final_count |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | None                      | 4     | All             | 4           | 2           |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | None                      | None  | All             | 6           | 0           |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "TX OR TEXAS OR FRANCE"   | 1     | All             | 1           | 1           |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "XdQQ6GCSNSBLTESTFAIL"    | 1     | All             | 0           | 0           |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "\"OFFICE2\" \| OFFICE3 \| OFFICE5" | 1     | All             | 1           | 1           |
+
+  Scenario Outline: Search recursively page by page data across the kinds with invalid inputs
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set an invalid cursor
+    And I send request to tenant <tenant>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | tenant    | kind                                       | query | limit | response_code | reponse_type                  | response_message                                  | errors                                    |
+      | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | None  | 400           | "Can't find the given cursor" | "The given cursor is invalid or expired"          | ""                                        |
+      | "tenant1" | "*:*:*"                                    | None  | 0     | 400           | "Bad Request"                 | "Invalid parameters were given on search request" | "Not a valid record kind. Found: *:*:*"   |
+      | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | -1    | 400           | "Bad Request"                 | "Invalid parameters were given on search request" | "'limit' must be equal or greater than 0" |
+
+  Scenario Outline:  Search recursively page by page data across the kinds with invalid inputs and headers
+    When I send <query> with <kind>
+    And I limit the count of returned results to <limit>
+    And I set the fields I want in response as <returned_fields>
+    And I send request to tenant <q1_tenant>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenant <q2_tenant>
+    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
+
+    Examples:
+      | q1_tenant | q2_tenant | kind                                       | query | limit | returned_fields | first_count | response_code | reponse_type    | response_message                                    | errors |
+      | "tenant1" | "tenant2" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | 1     | All             | 1           | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
+
+  Scenario Outline: Search data across the kinds with bounding box inputs
+    When I send <query> with <kind>
+    And I apply geographical query on field <field>
+    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
+    And I limit the count of returned results to <limit>
+    And I send request to tenant <q1_tenant>
+    Then I should get in response <first_count> records along with a cursor
+    And I send request to tenant <q2_tenant>
+    Then I should get in response <final_count> records
+
+    Examples:
+      | q1_tenant | q2_tenant | kind                                       | query  | limit | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | first_count | final_count |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None   | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 2           | 0           |
+      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | "OFFICE4" | 1     | "data.Location" | 45                | -110               | 0                     | 0                      | 1           | 0           |
+
+  Scenario Outline: Search data and sort the results with the given sort fields and order
+    When I send <query> with <kind>
+    And I want the results sorted by <sort>
+    Then I should get records in right order first record id: <first_record_id>, last record id: <last_record_id>
+    Examples:
+      | kind                                      | query       | sort                                                                         | first_record_id       | last_record_id        |
+      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["id"],"order":["ASC"]}                                             | "test:well:1.0.0:1"   | "test:well:2.0.0:3"   |
+      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["id"],"order":["DESC"]}                                            | "test:well:2.0.0:3"   | "test:well:1.0.0:1"   |
+      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["namespace","data.Rank"],"order":["ASC","DESC"]}                   | "test:well:1.0.0:3"   | "test:well:2.0.0:1"   |
+
+  Scenario Outline: Search data in a given kind with different searchAs modes
+    When I send <query> with <kind>
+    And I want to search as owner <is_owner>
+    Then I should get in response <count> records
+
+    Examples:
+      | kind                                       | query       | is_owner | count |
+      | "tenant1:testcursor<timestamp>:well:1.0.0" | None        | true     | 3     |
+      | "tenant1:testcursor<timestamp>:well:1.0.0" | None        | false    | 3     |
+      | "tenant1:testcursor<timestamp>:well:2.0.0" | None        | true     | 0     |
+      | "tenant1:testcursor<timestamp>:well:2.0.0" | None        | false    | 3     |
+      | "tenant1:testcursor<timestamp>:well:*"     | None        | false    | 6     |
+      | "tenant1:testcursor<timestamp>:well:*"     | None        | true     | 3     |
+      | "tenant1:testcursor<timestamp>:well:*"     | "OFFICE4"| true     | 1     |
+      | "tenant1:testcursor<timestamp>:well:*"     | None        | None     | 6     |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature b/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature
new file mode 100644
index 0000000000000000000000000000000000000000..408319b40c2c570d8f72ff07ab4333545b0d5ace
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature
@@ -0,0 +1,8 @@
+Feature: Smart Parser
+  To allow a client to parse a smart search query into a full search query syntax.
+
+  Scenario: Parse smart search input to query api input
+    When I generate smart search input with "text" and "well"
+    Then I get a response matching
+
+
diff --git a/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature b/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature
new file mode 100644
index 0000000000000000000000000000000000000000..3da32045c661d617ce821555256471b6e0f06d71
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature
@@ -0,0 +1,30 @@
+Feature: Smart search
+  To allow a client to get the available filters, find the possible values to use for the filter and perform a search based on a filter and value.
+
+  Background: 
+    Given the elastic search is initialized with the following data
+      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                         | ownerGroup                               |
+      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1      |
+      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.search.integrationtest@tenant1 | data.search.integrationtestowner@tenant1 |
+  # TODO: Enable the test when ECE CCS is utilized, the test looks correct, but it does not correspond to the current stopgap specification
+  @ignore
+  Scenario Outline: Perform smart search based on query
+    When I synchronize the values in cache
+    When I list all filters on tenants <tenants>
+    Then I get the list of all available filters
+    Then I take the first filter and list values when query is <query>
+    Then I search with the filter and values and <limit> and <offset> for matching response
+
+    Examples: 
+      | tenants            | query | limit | offset |
+      | "common","tenant1" | "w"   |     2 |      1 |
+  @ignore
+  Scenario Outline: Perform smart search based on filters and values
+    When I list all filters on tenants <tenants>
+    When I take the <filter> and value <value>
+    Then I search response matching for offset <offset> and limit <limit>
+
+    Examples: 
+      | tenants            | limit | offset | filter           | value |
+      | "common","tenant1" |     1 |      0 | "text"           | "S"   |
+      | "common","tenant1" |     1 |      0 | "Field,Operator" | "S"   |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature b/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature
new file mode 100644
index 0000000000000000000000000000000000000000..23271eea8cc6acf1f7b7685a1c6537371b022ee9
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature
@@ -0,0 +1,20 @@
+Feature: Updating elastic index mapping
+  This feature deals with updating Index mapping in Elastic Search.
+
+  Background:
+    Given the elastic search is initialized with the following data
+      | kind                                            | index                                           | mappingFile | recordFile | viewerGroup                  | ownerGroup                               |
+      | tenant1:testupdatemapping<timestamp>:well:1.0.0 | tenant1-testupdatemapping<timestamp>-well-1.0.0 | records_3   | records_3  | data.default.viewers@opendes | data.default.owners@opendes |
+      | tenant1:testupdatemapping<timestamp>:well:2.0.0 | tenant1-testupdatemapping<timestamp>-well-2.0.0 | records_3   | records_3  | data.default.viewers@opendes | data.default.owners@opendes |
+
+  Scenario Outline: Update indices to enable multifield indexing
+    When I update <fieldName> in <indices> to enable multifield indexing
+    And I send request to tenant <tenant>
+    Then I should get <response_code> response
+    Then I want to validate mapping by <indices> and <fieldName> and <type>
+    When I send <query> with <kind>
+    And I want to aggregate by <fieldName>
+    Then I should get in response <count> records
+    Examples:
+      | tenant    | fieldName | type   | kind                                      | indices                                                                                              | response_code | query | count |
+      | "tenant1" | "Center"  | "well" |"tenant1:testupdatemapping<timestamp>:*:*" | "tenant1-testupdatemapping<timestamp>-well-1.0.0" ,"tenant1-testupdatemapping<timestamp>-well-2.0.0" | 200           | None  | 6     |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/index_records_1.json b/testing/indexer-test-core/src/main/resources/testData/index_records_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..145b5a78bab6867d7635f360383fbedac272a64c
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/index_records_1.json
@@ -0,0 +1,112 @@
+[
+  {
+    "id": "tenant1:ihs:testIngest2<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE - 2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest3<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest4<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest5<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest6<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  }
+]
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/index_records_1.schema b/testing/indexer-test-core/src/main/resources/testData/index_records_1.schema
new file mode 100644
index 0000000000000000000000000000000000000000..bfdc78abd4664a98824562eab09d32ffc283395d
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/index_records_1.schema
@@ -0,0 +1,61 @@
+{
+  "kind": "KIND_VAL",
+  "schema": [
+    {
+       "path": "Field",
+       "kind": "string"
+    },
+    {
+      "path": "Location",
+      "kind": "core:dl:geopoint:1.0.0"
+    },
+    {
+      "path": "Basin",
+      "kind": "string"
+    },
+    {
+      "path": "County",
+      "kind": "string"
+    },
+    {
+      "path": "State",
+      "kind": "string"
+    },
+    {
+      "path": "Country",
+      "kind": "string"
+    },
+    {
+      "path": "WellStatus",
+      "kind": "string"
+    },
+    {
+      "path": "OriginalOperator",
+      "kind": "string"
+    },
+    {
+      "path": "WellName",
+      "kind": "string"
+    },
+    {
+      "path": "WellType",
+      "kind": "string"
+    },
+    {
+      "path": "EmptyAttribute",
+      "kind": "string"
+    },
+    {
+      "path": "Rank",
+      "kind": "int"
+    },
+    {
+      "path": "Score",
+       "kind": "int"
+    },
+    {
+      "path": "Established",
+      "kind": "datetime"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/index_records_2.json b/testing/indexer-test-core/src/main/resources/testData/index_records_2.json
new file mode 100644
index 0000000000000000000000000000000000000000..9c35fcd6e58bcd19e01eef2b858ce19b7efcea71
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/index_records_2.json
@@ -0,0 +1,113 @@
+[
+  {
+    "id": "tenant1:ihs:testIngest7<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest8<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest9<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest10<timestamp>",
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": {
+        "latitude":32.406402588,
+        "longitude":-86.565592762
+      },
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score" : 10,
+      "Established": "2000-03-27T23:38:48Z"
+    }
+  },
+  {
+    "id": "tenant1:ihs:testIngest11<timestamp>",
+    "data": {
+      "Field": 1234,
+      "Location": {
+        "latitude":"BA1",
+        "longitude":-86.565592762
+      },
+      "Basin": 789,
+      "County": 0.99,
+      "State": 0.56,
+      "Country": 1234,
+      "WellStatus": 528693,
+      "OriginalOperator": 564,
+      "WellName": 0.98,
+      "WellType": 454476578,
+      "EmptyAttribute": 1234,
+      "Rank": "Test",
+      "Score" : 10,
+      "Established": 123456,
+      "InvalidInteger": 2147483648
+    }
+  }
+]
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/index_records_2.schema b/testing/indexer-test-core/src/main/resources/testData/index_records_2.schema
new file mode 100644
index 0000000000000000000000000000000000000000..2d272b735099a5d36d2e50a56b6da8e303f36e50
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/index_records_2.schema
@@ -0,0 +1,65 @@
+{
+  "kind": "KIND_VAL",
+  "schema": [
+    {
+       "path": "Field",
+       "kind": "string"
+    },
+    {
+      "path": "Location",
+      "kind": "core:dl:geopoint:1.0.0"
+    },
+    {
+      "path": "Basin",
+      "kind": "string"
+    },
+    {
+      "path": "County",
+      "kind": "string"
+    },
+    {
+      "path": "State",
+      "kind": "string"
+    },
+    {
+      "path": "Country",
+      "kind": "string"
+    },
+    {
+      "path": "WellStatus",
+      "kind": "string"
+    },
+    {
+      "path": "OriginalOperator",
+      "kind": "string"
+    },
+    {
+      "path": "WellName",
+      "kind": "string"
+    },
+    {
+      "path": "WellType",
+      "kind": "string"
+    },
+    {
+      "path": "EmptyAttribute",
+      "kind": "string"
+    },
+    {
+      "path": "Rank",
+      "kind": "int"
+    },
+    {
+      "path": "Score",
+       "kind": "int"
+    },
+    {
+      "path": "Established",
+      "kind": "datetime"
+    },
+    {
+       "path": "InvalidInteger",
+       "kind": "int"
+    }
+  ]
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_1.json b/testing/indexer-test-core/src/main/resources/testData/records_1.json
new file mode 100644
index 0000000000000000000000000000000000000000..14d4b3c84ec863530213737e76cd8494501e825f
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_1.json
@@ -0,0 +1,113 @@
+[
+  {
+    "id": "test:well:1.0.0:1",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 1",
+      "Location": "18.5204,73.8567",
+      "Basin": "India",
+      "State": "Maharashtra",
+      "Country": "India",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE1",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score": 10,
+      "Established": "1990-03-27T23:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:1.0.0:2",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 3",
+      "Location": "29.749655,-95.473476",
+      "Basin": "KATY",
+      "County": "Waller",
+      "State": "TEXAS",
+      "Country": "United States",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE3",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 2,
+      "Score": 12,
+      "Established": "2010-03-27T20:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:1.0.0:3",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 4",
+      "Location": "29.6197,-95.6349",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE4",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 20,
+      "Established": "2018-03-27T23:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  }
+]
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_1.mapping b/testing/indexer-test-core/src/main/resources/testData/records_1.mapping
new file mode 100644
index 0000000000000000000000000000000000000000..1d2be2d85a56b2b1c26fa9c8881351242e44f010
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_1.mapping
@@ -0,0 +1,93 @@
+{
+	"well": {
+		"properties": {
+			"id": {
+				"type": "keyword"
+			},
+			"kind": {
+				"type": "keyword"
+			},
+			"namespace": {
+				"type": "keyword"
+			},
+			"type": {
+				"type": "keyword"
+			},
+			"version": {
+				"type": "keyword"
+			},
+			"x-acl": {
+				"type": "keyword"
+			},
+			"acl": {
+				"properties": {
+					"owners": {
+						"type": "keyword"
+					},
+					"viewers": {
+						"type": "keyword"
+					}
+				}
+			},
+			"legal": {
+				"properties": {
+					"legaltags": {
+						"type": "keyword"
+					},
+					"otherRelevantDataCountries": {
+						"type": "keyword"
+					},
+					"status": {
+						"type": "keyword"
+					}
+				}
+			},
+			"data": {
+				"properties": {
+					"Field": {
+						"type": "text"
+					},
+					"Location": {
+						"type": "geo_point"
+					},
+					"Basin": {
+						"type": "text"
+					},
+					"County": {
+						"type": "text"
+					},
+					"State": {
+						"type": "text"
+					},
+					"Country": {
+						"type": "text"
+					},
+					"WellStatus": {
+						"type": "text"
+					},
+					"OriginalOperator": {
+						"type": "text"
+					},
+					"WellName": {
+						"type": "text"
+					},
+					"WellType": {
+						"type": "text"
+					},
+					"EmptyAttribute": {
+						"type": "text"
+					},
+					"Rank": {
+						"type": "integer"
+					},
+					"Score": {
+						"type": "integer"
+					},
+					"Established": {
+						"type": "date"
+					}
+				}
+			}
+		}
+	}
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_2.json b/testing/indexer-test-core/src/main/resources/testData/records_2.json
new file mode 100644
index 0000000000000000000000000000000000000000..12f9ac646e579619b094aa19e571a863b8e23cac
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_2.json
@@ -0,0 +1,109 @@
+[
+  {
+    "id": "test:well:2.0.0:1",
+    "kind": "<kind>",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 2",
+      "Location": "29.7604,-95.3698",
+      "Basin": "Houston",
+      "County": "Harris",
+      "State": "TX",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE2",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score": 10,
+      "Established": "2000-03-27T23:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:2.0.0:2",
+    "kind": "<kind>",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 5",
+      "Location": "37.3861,-122.0839",
+      "Basin": "Mountain View",
+      "County": "Orange",
+      "State": "CA",
+      "Country": "USA",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE5",
+      "WellName": "Data Platform Services",
+      "WellType": "Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 2,
+      "Score": 5,
+      "Established": "1980-03-27T20:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:2.0.0:3",
+    "kind": "<kind>",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 6",
+      "Location": "43.6108, 3.8767",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE6",
+      "Country": "France",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 20,
+      "Established": "2005-03-27T23:38:48Z"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  }
+]
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_2.mapping b/testing/indexer-test-core/src/main/resources/testData/records_2.mapping
new file mode 100644
index 0000000000000000000000000000000000000000..16703041fde747a34fe2e911674b3c3f4b31bee5
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_2.mapping
@@ -0,0 +1,90 @@
+{
+	"well": {
+		"properties": {
+			"id": {
+				"type": "keyword"
+			},
+			"kind": {
+				"type": "keyword"
+			},
+			"namespace": {
+				"type": "keyword"
+			},
+			"type": {
+				"type": "keyword"
+			},
+			"version": {
+				"type": "keyword"
+			},
+			"x-acl": {
+				"type": "keyword"
+			},
+			"acl": {
+				"properties": {
+					"owners": {
+						"type": "keyword"
+					},
+					"viewers": {
+						"type": "keyword"
+					}
+				}
+			},
+			"legal": {
+				"properties": {
+					"legaltags": {
+						"type": "keyword"
+					},
+					"otherRelevantDataCountries": {
+						"type": "keyword"
+					},
+					"status": {
+						"type": "keyword"
+					}
+				}
+			},
+			"data": {
+				"properties": {
+					"Field": {
+						"type": "text"
+					},
+					"Location": {
+						"type": "geo_point"
+					},
+					"Basin": {
+						"type": "text"
+					},
+					"County": {
+						"type": "text"
+					},
+					"State": {
+						"type": "text"
+					},
+					"Country": {
+						"type": "text"
+					},
+					"WellStatus": {
+						"type": "text"
+					},
+					"OriginalOperator": {
+						"type": "text"
+					},
+					"WellName": {
+						"type": "text"
+					},
+					"WellType": {
+						"type": "text"
+					},
+					"EmptyAttribute": {
+						"type": "text"
+					},
+					"Rank": {
+						"type": "integer"
+					},
+					"Established": {
+						"type": "date"
+					}
+				}
+			}
+		}
+	}
+}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_3.json b/testing/indexer-test-core/src/main/resources/testData/records_3.json
new file mode 100644
index 0000000000000000000000000000000000000000..a159d885c7a108d19c6831502192c93667323f2e
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_3.json
@@ -0,0 +1,116 @@
+[
+  {
+    "id": "test:well:1.0.0:1",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 1",
+      "Location": "18.5204,73.8567",
+      "Basin": "India",
+      "State": "Maharashtra",
+      "Country": "India",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE1",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 1,
+      "Score": 10,
+      "Established": "1990-03-27T23:38:48Z",
+      "Center": "OFFICE -1"
+   },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:1.0.0:2",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 3",
+      "Location": "29.749655,-95.473476",
+      "Basin": "KATY",
+      "County": "Waller",
+      "State": "TEXAS",
+      "Country": "United States",
+      "WellStatus": "Under development",
+      "OriginalOperator": "KDFC",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 2,
+      "Score": 12,
+      "Established": "2010-03-27T20:38:48Z",
+      "Center": "KATY"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  },
+  {
+    "id": "test:well:1.0.0:3",
+    "kind": "<Kind>",
+    "namespace": "tenant1:testdatasource",
+    "type": "well",
+    "acl": "{viewers=[<Data-Group>], owners=[<Data-Group>]}",
+    "x-acl": [
+      "<Data-Group>"
+    ],
+    "legal": {
+      "legaltags": [
+        "<Legal-Tag>"
+      ],
+      "otherRelevantDataCountries": [
+        "<Country>"
+      ],
+      "status": "compliant"
+    },
+    "data": {
+      "Field": "OSDU OFFICE - 4",
+      "Location": "29.6197,-95.6349",
+      "WellStatus": "Under development",
+      "OriginalOperator": "OFFICE4",
+      "WellName": "Data Platform Services",
+      "WellType": "Data Lake Cloud",
+      "EmptyAttribute": "",
+      "Rank": 20,
+      "Established": "2018-03-27T23:38:48Z",
+      "Center": "SUGARLAND"
+    },
+    "index": {
+      "statusCode": 200,
+      "lastUpdateTime": "2018-10-04T10:11:22.303000"
+    }
+  }
+]
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/testData/records_3.mapping b/testing/indexer-test-core/src/main/resources/testData/records_3.mapping
new file mode 100644
index 0000000000000000000000000000000000000000..e13d948c883920aef7577d59ea01e5eea08a653b
--- /dev/null
+++ b/testing/indexer-test-core/src/main/resources/testData/records_3.mapping
@@ -0,0 +1,101 @@
+{
+  "well": {
+    "properties": {
+      "id": {
+        "type": "keyword"
+      },
+      "kind": {
+        "type": "keyword"
+      },
+      "namespace": {
+        "type": "keyword"
+      },
+      "type": {
+        "type": "keyword"
+      },
+      "version": {
+        "type": "keyword"
+      },
+      "x-acl": {
+        "type": "keyword"
+      },
+      "acl": {
+        "properties": {
+          "owners": {
+            "type": "keyword"
+          },
+          "viewers": {
+            "type": "keyword"
+          }
+        }
+      },
+      "legal": {
+        "properties": {
+          "legaltags": {
+            "type": "keyword"
+          },
+          "otherRelevantDataCountries": {
+            "type": "keyword"
+          },
+          "status": {
+            "type": "keyword"
+          }
+        }
+      },
+      "data": {
+        "properties": {
+          "Field": {
+            "type": "text"
+          },
+          "Location": {
+            "type": "geo_point"
+          },
+          "Basin": {
+            "type": "text"
+          },
+          "County": {
+            "type": "text"
+          },
+          "State": {
+            "type": "text"
+          },
+          "Country": {
+            "type": "text"
+          },
+          "WellStatus": {
+            "type": "text"
+          },
+          "OriginalOperator": {
+            "type": "text"
+          },
+          "WellName": {
+            "type": "text"
+          },
+          "WellType": {
+            "type": "text"
+          },
+          "EmptyAttribute": {
+            "type": "text"
+          },
+          "Rank": {
+            "type": "integer"
+          },
+          "Score": {
+            "type": "integer"
+          },
+          "Established": {
+            "type": "date"
+          },
+          "Center": {
+            "type": "text",
+            "fields": {
+              "raw": {
+                "type": "keyword"
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/pom.xml b/testing/indexer-test-gcp/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..e51248f384e278556dabffb8f4757541d458c12c
--- /dev/null
+++ b/testing/indexer-test-gcp/pom.xml
@@ -0,0 +1,130 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+    <modelVersion>4.0.0</modelVersion>
+
+    <groupId>org.opengroup.osdu.indexer</groupId>
+    <artifactId>indexer-test-gcp</artifactId>
+    <version>0.0.1</version>
+    <packaging>jar</packaging>
+
+    <properties>
+        <maven.compiler.target>1.8</maven.compiler.target>
+        <maven.compiler.source>1.8</maven.compiler.source>
+        <cucumber.version>1.2.5</cucumber.version>
+    </properties>
+
+    <dependencies>
+        <dependency>
+            <groupId>com.google.api-client</groupId>
+            <artifactId>google-api-client</artifactId>
+            <version>1.28.0</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>com.fasterxml.jackson.core</groupId>
+                    <artifactId>jackson-core</artifactId>
+                </exclusion>
+            </exclusions>
+        </dependency>
+
+        <dependency>
+            <groupId>org.opengroup.osdu.indexer</groupId>
+            <artifactId>indexer-test-core</artifactId>
+            <version>0.0.1</version>
+        </dependency>
+
+        <!-- Cucumber -->
+        <dependency>
+            <groupId>junit</groupId>
+            <artifactId>junit</artifactId>
+            <version>4.12</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>info.cukes</groupId>
+            <artifactId>cucumber-java</artifactId>
+            <version>${cucumber.version}</version>
+            <scope>test</scope>
+        </dependency>
+        <dependency>
+            <groupId>info.cukes</groupId>
+            <artifactId>cucumber-junit</artifactId>
+            <version>${cucumber.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <!-- Gson: Java to Json conversion -->
+        <dependency>
+            <groupId>com.google.code.gson</groupId>
+            <artifactId>gson</artifactId>
+            <version>2.8.5</version>
+            <scope>compile</scope>
+        </dependency>
+        <dependency>
+            <groupId>com.fasterxml.jackson.jaxrs</groupId>
+            <artifactId>jackson-jaxrs-json-provider</artifactId>
+            <version>2.9.9</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.glassfish</groupId>
+            <artifactId>javax.json</artifactId>
+            <version>1.1.4</version>
+        </dependency>
+        <dependency>
+            <groupId>com.sun.jersey</groupId>
+            <artifactId>jersey-client</artifactId>
+            <version>1.19.4</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.projectlombok</groupId>
+            <artifactId>lombok</artifactId>
+            <version>1.18.2</version>
+            <scope>provided</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-io</groupId>
+            <artifactId>commons-io</artifactId>
+            <version>2.6</version>
+        </dependency>
+
+        <!--Elasticsearch-->
+        <dependency>
+            <groupId>org.elasticsearch</groupId>
+            <artifactId>elasticsearch</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.elasticsearch.client</groupId>
+            <artifactId>elasticsearch-rest-client</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.elasticsearch.client</groupId>
+            <artifactId>elasticsearch-rest-high-level-client</artifactId>
+            <version>6.6.2</version>
+        </dependency>
+
+        <!--Logging-->
+        <dependency>
+            <groupId>org.apache.logging.log4j</groupId>
+            <artifactId>log4j-to-slf4j</artifactId>
+            <version>2.11.2</version>
+        </dependency>
+        <dependency>
+            <groupId>org.slf4j</groupId>
+            <artifactId>slf4j-jdk14</artifactId>
+            <version>1.8.0-beta4</version>
+        </dependency>
+
+        <dependency>
+            <groupId>com.google.guava</groupId>
+            <artifactId>guava</artifactId>
+            <version>27.1-jre</version>
+        </dependency>
+    </dependencies>
+</project>
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/RunTest.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/RunTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b04eb1b3079df08037ce32ada4f8f007c941e410
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/RunTest.java
@@ -0,0 +1,13 @@
+package org.opengroup.osdu.step_definitions.index.deleteschema;
+
+import cucumber.api.CucumberOptions;
+import cucumber.api.junit.Cucumber;
+import org.junit.runner.RunWith;
+
+@RunWith(Cucumber.class)
+@CucumberOptions(
+        features = "classpath:features/delete/Delete.feature",
+        glue={"classpath:org.opengroup.osdu.step_definitions/index/deleteschema"},
+        format = {"pretty", "junit:target/cucumber-reports/TEST-deleteschema.xml"})
+public class RunTest {
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/Steps.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/Steps.java
new file mode 100644
index 0000000000000000000000000000000000000000..344d597375249c43a2db50cea58ec6177e1346d3
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/deleteschema/Steps.java
@@ -0,0 +1,48 @@
+package org.opengroup.osdu.step_definitions.index.deleteschema;
+
+import cucumber.api.Scenario;
+import cucumber.api.java.Before;
+import org.opengroup.osdu.common.DeleteSchemaSteps;
+import org.opengroup.osdu.util.GCPHTTPClient;
+
+import cucumber.api.DataTable;
+import cucumber.api.java.en.Given;
+import cucumber.api.java.en.Then;
+import cucumber.api.java.en.When;
+
+import java.util.List;
+
+public class Steps extends DeleteSchemaSteps {
+
+    public Steps() {
+        super(new GCPHTTPClient());
+    }
+
+    @Before
+    public void before(Scenario scenario) {
+        this.scenario = scenario;
+        this.httpClient = new GCPHTTPClient();
+    }
+
+    @Given("^the elastic search is initialized with the following data$")
+    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
+        super.the_elastic_search_is_initialized_with_the_following_data(dataTable);
+    }
+
+    @When("^I send a delete request with \"([^\"]*)\"$")
+    public void i_send_a_delete_request_with(String kind) throws Throwable {
+        super.i_send_a_delete_request_with(kind);
+    }
+
+    @Then("^the index should get delete and I should get (\\d+) response$")
+    public void the_index_should_get_delete_and_I_should_get_response(int code) throws Throwable {
+        super.the_index_should_get_delete_and_I_should_get_response(code);
+    }
+
+    @Then("^I should get ([^\"]*) response with reason: \"(.*?)\", message: \"(.*?)\" and errors: \"(.*?)\"$")
+    public void i_should_get_response_with_reason_message_and_errors(List<Integer> codes, String type, String msg,
+                                                                     String error) throws Throwable {
+        super.i_should_get_response_with_reason_message_and_errors(codes, type, msg, error);
+    }
+
+}
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/RunTest.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/RunTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..b60b4a1c581517f89f6e0b447738847cb2f75872
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/RunTest.java
@@ -0,0 +1,14 @@
+package org.opengroup.osdu.step_definitions.index.getschema;
+
+import cucumber.api.CucumberOptions;
+import cucumber.api.junit.Cucumber;
+import org.junit.runner.RunWith;
+
+
+@RunWith(Cucumber.class)
+@CucumberOptions(
+        features = "classpath:features/kindschema/KindSchema.feature",
+        glue = {"classpath:org.opengroup.osdu.step_definitions/index/getschema"},
+        format = {"pretty", "junit:target/cucumber-reports/TEST-getschema.xml"})
+public class RunTest {
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/Steps.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/Steps.java
new file mode 100644
index 0000000000000000000000000000000000000000..349f6163f72cf6719526e011c210a8f55a0596a0
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/getschema/Steps.java
@@ -0,0 +1,50 @@
+package org.opengroup.osdu.step_definitions.index.getschema;
+
+import org.opengroup.osdu.common.GetSchemaSteps;
+import org.opengroup.osdu.util.GCPHTTPClient;
+
+import cucumber.api.Scenario;
+import cucumber.api.java.Before;
+import cucumber.api.DataTable;
+import cucumber.api.java.en.Given;
+import cucumber.api.java.en.Then;
+import cucumber.api.java.en.When;
+
+public class Steps extends GetSchemaSteps {
+    public Steps() {
+        super(new GCPHTTPClient());
+    }
+
+    @Before
+    public void before(Scenario scenario) {
+        this.scenario = scenario;
+        this.httpClient = new GCPHTTPClient();
+    }
+
+    @Given("^the elastic search is initialized with the following data$")
+    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
+        super.the_elastic_search_is_initialized_with_the_following_data(dataTable);
+    }
+
+    @When("^I send get schema request with \"([^\"]*)\"$")
+    public void i_send_get_schema_request_with(String kind) throws Throwable {
+        super.i_send_get_schema_request_with(kind);
+    }
+
+    @When("^I send request to tenant \"(.*?)\"$")
+    public void i_send_request_to_tenant(String tenant) throws Throwable {
+        super.i_send_request_to_tenant(tenant);
+    }
+
+    @Then("^I should get ([^\"]*) response with reason: \"(.*?)\", message: \"(.*?)\" and errors: \"(.*?)\"$")
+    public void i_should_get_response_with_reason_message_and_errors(int responseCode, String type, String msg,
+                                                                     String error) throws Throwable {
+        super.i_should_get_response_with_reason_message_and_errors(responseCode, type, msg, error);
+    }
+
+    @Then("^I should get (\\d+) status with response \"(.*?)\"$")
+    public void i_should_get_status_with_response(int statusCode, String response) throws Throwable {
+        super.i_should_get_status_with_response(statusCode, response);
+    }
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/RunTest.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/RunTest.java
new file mode 100644
index 0000000000000000000000000000000000000000..4978ddfccad197628432d4ccb8b5985b4af5017e
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/RunTest.java
@@ -0,0 +1,13 @@
+package org.opengroup.osdu.step_definitions.index.record;
+
+import cucumber.api.CucumberOptions;
+import cucumber.api.junit.Cucumber;
+import org.junit.runner.RunWith;
+
+@RunWith(Cucumber.class)
+@CucumberOptions(
+        features = "classpath:features/indexrecord/IndexRecord.feature",
+        glue = {"classpath:org.opengroup.osdu.step_definitions/index/record"},
+        plugin = {"pretty", "junit:target/cucumber-reports/TEST-indexrecord.xml"})
+public class RunTest {
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/Steps.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/Steps.java
new file mode 100644
index 0000000000000000000000000000000000000000..348474e7769bb54d2362d079ad9aa093760d6fa2
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/step_definitions/index/record/Steps.java
@@ -0,0 +1,52 @@
+package org.opengroup.osdu.step_definitions.index.record;
+
+import lombok.extern.java.Log;
+import org.opengroup.osdu.common.RecordSteps;
+import org.opengroup.osdu.util.GCPHTTPClient;
+
+import cucumber.api.Scenario;
+import cucumber.api.java.Before;
+import cucumber.api.DataTable;
+import cucumber.api.java.en.Given;
+import cucumber.api.java.en.Then;
+import cucumber.api.java.en.When;
+
+@Log
+public class Steps extends RecordSteps {
+
+    public Steps() {
+        super(new GCPHTTPClient());
+    }
+
+    @Before
+    public void before(Scenario scenario) {
+        this.scenario = scenario;
+        this.httpClient = new GCPHTTPClient();
+    }
+
+    @Given("^the schema is created with the following kind$")
+    public void the_schema_is_created_with_the_following_kind(DataTable dataTable) {
+        super.the_schema_is_created_with_the_following_kind(dataTable);
+    }
+
+    @When("^I ingest records with the \"(.*?)\" with \"(.*?)\" for a given \"(.*?)\"$")
+    public void i_ingest_records_with_the_for_a_given(String record, String dataGroup, String kind) {
+        super.i_ingest_records_with_the_for_a_given(record, dataGroup, kind);
+    }
+
+    @Then("^I should get the (\\d+) documents for the \"([^\"]*)\" in the Elastic Search$")
+    public void i_should_get_the_documents_for_the_in_the_Elastic_Search(int expectedCount, String index) throws Throwable {
+        super.i_should_get_the_documents_for_the_in_the_Elastic_Search(expectedCount, index);
+    }
+
+    @Then("^I should get the elastic \"(.*?)\" for the \"([^\"]*)\" and \"([^\"]*)\" in the Elastic Search$")
+    public void i_should_get_the_elastic_for_the_tenant_testindex_timestamp_well_in_the_Elastic_Search(String expectedMapping, String type, String index) throws Throwable {
+        super.i_should_get_the_elastic_for_the_tenant_testindex_timestamp_well_in_the_Elastic_Search(expectedMapping, type, index);
+    }
+
+    @Then("^I should get the (\\d+) documents for the \"([^\"]*)\" in the Elastic Search with out \"(.*?)\"$")
+    public void iShouldGetTheNumberDocumentsForTheIndexInTheElasticSearchWithOutSkippedAttribute(int expectedCount, String index, String skippedAttributes) throws Throwable {
+        super.iShouldGetTheNumberDocumentsForTheIndexInTheElasticSearchWithOutSkippedAttribute(expectedCount, index, skippedAttributes);
+    }
+
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/GCPHTTPClient.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/GCPHTTPClient.java
new file mode 100644
index 0000000000000000000000000000000000000000..d5bca798c728cdb9d1d13a2b3fca96fd056f209c
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/GCPHTTPClient.java
@@ -0,0 +1,25 @@
+package org.opengroup.osdu.util;
+
+import lombok.ToString;
+import lombok.extern.java.Log;
+
+import java.io.IOException;
+
+@Log
+@ToString
+public class GCPHTTPClient extends HTTPClient {
+
+    private static String token = null;
+
+    @Override
+    public synchronized String getAccessToken() {
+        if(token == null) {
+            try {
+                token = "Bearer " + JwtTokenUtil.getAccessToken();
+            } catch (IOException e) {
+                e.printStackTrace();
+            }
+        }
+        return token;
+    }
+}
\ No newline at end of file
diff --git a/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/JwtTokenUtil.java b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/JwtTokenUtil.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c13aaae6539701a93daf5793be42e12a2e9da3b
--- /dev/null
+++ b/testing/indexer-test-gcp/src/test/java/org/opengroup/osdu/util/JwtTokenUtil.java
@@ -0,0 +1,103 @@
+package org.opengroup.osdu.util;
+
+import com.google.api.client.googleapis.auth.oauth2.GoogleCredential;
+import com.google.api.client.json.JsonFactory;
+import com.google.api.client.json.jackson2.JacksonFactory;
+import com.google.api.client.json.webtoken.JsonWebSignature;
+import com.google.api.client.json.webtoken.JsonWebToken;
+import com.google.api.client.util.Clock;
+import com.google.common.base.Strings;
+import com.google.gson.Gson;
+import lombok.Data;
+import org.apache.commons.io.Charsets;
+import org.apache.commons.io.IOUtils;
+import org.apache.http.HttpResponse;
+import org.apache.http.NameValuePair;
+import org.apache.http.client.entity.UrlEncodedFormEntity;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.message.BasicNameValuePair;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.security.GeneralSecurityException;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.List;
+
+class JwtTokenUtil {
+
+    private static String accessToken;
+
+    static String getAccessToken() throws IOException {
+
+        if (Strings.isNullOrEmpty(accessToken)) {
+            accessToken = getServiceAccountAccessToken(getJwtForIntegrationTesterAccount());
+        }
+        return accessToken;
+    }
+
+    private static String getServiceAccountAccessToken(String key) throws IOException {
+
+        try (CloseableHttpClient httpClient = HttpClientBuilder.create().build()) {
+
+            List<NameValuePair> parameters = new ArrayList<>();
+            parameters.add(new BasicNameValuePair("grant_type", "urn:ietf:params:oauth:grant-type:jwt-bearer"));
+            parameters.add(new BasicNameValuePair("assertion", key));
+
+            HttpPost postRequest = new HttpPost("https://www.googleapis.com/oauth2/v4/token");
+            postRequest.addHeader("Content-Type", "application/x-www-form-urlencoded");
+            postRequest.setEntity(new UrlEncodedFormEntity(parameters));
+
+            HttpResponse response = httpClient.execute(postRequest);
+            String responseContent = IOUtils.toString(response.getEntity().getContent(), Charsets.toCharset("UTF-8"));
+
+            JwtTokenUtil.ResponseToken responseToken = new Gson().fromJson(responseContent, JwtTokenUtil.ResponseToken.class);
+
+            return responseToken.getId_token();
+        }
+    }
+
+    private static String getJwtForIntegrationTesterAccount() throws IOException {
+        String serviceAccountFile = Config.getKeyValue();
+        return getJwt(serviceAccountFile);
+    }
+
+    private static String getJwt(String serviceAccountFile) throws IOException {
+
+        String targetAudience = Config.getTargetAudience();
+        long currentTime = Clock.SYSTEM.currentTimeMillis();
+
+        InputStream stream = new ByteArrayInputStream(Base64.getDecoder().decode(serviceAccountFile));
+        GoogleCredential credential = GoogleCredential.fromStream(stream);
+
+        JsonWebSignature.Header header = new JsonWebSignature.Header();
+        header.setAlgorithm("RS256");
+        header.setType("JWT");
+        header.setKeyId(credential.getServiceAccountPrivateKeyId());
+
+        JsonWebSignature.Payload payload = new JsonWebToken.Payload();
+        payload.setIssuedAtTimeSeconds(currentTime / 1000);
+        payload.setExpirationTimeSeconds(currentTime / 1000 + 3600);
+        payload.setAudience("https://www.googleapis.com/oauth2/v4/token");
+        payload.setIssuer(credential.getServiceAccountId());
+        payload.set("target_audience", targetAudience);
+
+        JsonFactory jsonFactory = JacksonFactory.getDefaultInstance();
+        String signedJwt = null;
+        try {
+            signedJwt = JsonWebSignature.signUsingRsaSha256(credential.getServiceAccountPrivateKey(), jsonFactory, header, payload);
+        } catch (GeneralSecurityException e) {
+            e.printStackTrace();
+        }
+
+        return signedJwt;
+    }
+
+    @Data
+    class ResponseToken {
+        public String id_token;
+    }
+}
diff --git a/testing/maven/settings.xml b/testing/maven/settings.xml
new file mode 100644
index 0000000000000000000000000000000000000000..601aa71ea3679d558de22b4e530c9dc2a9467f7f
--- /dev/null
+++ b/testing/maven/settings.xml
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<settings xmlns="http://maven.apache.org/SETTINGS/1.0.0"
+	xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+	xsi:schemaLocation="http://maven.apache.org/SETTINGS/1.0.0 http://maven.apache.org/xsd/settings-1.0.0.xsd">
+	<servers>
+		<server>
+			<id>dev-azure-com-slb-des-ext-collaboration-os-core</id>
+			<username>os-core</username>
+			<!-- Treat this auth token like a password. Do not share it with anyone, including Microsoft support. -->
+			<!-- The generated token expires on or before 10/8/2019 -->
+			<password>${VSTS_FEED_TOKEN}</password>
+		</server>
+	</servers>
+</settings>
\ No newline at end of file
diff --git a/testing/readme.md b/testing/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..0d9166e071d7427a4cf2e050ac38be34142703ac
--- /dev/null
+++ b/testing/readme.md
@@ -0,0 +1,74 @@
+# Environment setup for developers
+
+##Checklist: You should have done following setup before running the integration tests:
+1) Create Search Integration test service account
+2) Setup Search Integration test account in Entitlement service with following groups:
+
+    i. service.search.user
+    
+    ii. service.entitlements.user
+    
+    ii. users@{tenant1}@{domain}.com
+3) Create data group, add the service account to that group and substitute **DEFAULT_DATA_GROUP** in Config file
+4) Create a valid legal tag (eg: my-legal-tag) with a other relevant data countries (eg: US) and update **DEFAULT_LEGAL_TAG** and **DEFAULT_OTHER_RELEVANT_DATA_COUNTRIES** variables on config files respectively.
+5) Update **DEFAULT_SEARCH_INTEGRATION_TESTER** variable in Config file with base64 encoded value to service account json key
+6) Update **DEFAULT_SEARCH_ON_BEHALF_INTEGRATION_TESTER** variable in Config file with base64 encoded value to service account json key (it will be used for slb-on-behalf-header)
+7) Have credentials for Elastic Cluster and update **DEFAULT_ELASTIC_HOST**, **DEFAULT_ELASTIC_USER_NAME** and **DEFAULT_ELASTIC_PASSWORD**.
+8) Update **DEFAULT_TARGET_AUDIENCE** with the Google audience
+
+Note: 
+1) Config (Config.java) file is present in org.opengroup.osdu.util package
+2) Do not add the service account to tenant2 (in Entitlements)
+
+## Step 1:
+Import the project using maven and maven should resolve all the dependencies automatically
+
+## Step 2:
+Install [Lombok plugin](https://projectlombok.org/setup/intellij)
+
+## Step 3:
+Add the search cluster settings to Config.java
+ 
+## Step 4:
+Execute following command to build code and run all the integration tests:
+```
+mvn clean install -P integration-test
+```
+
+#How to write a new Integration test?
+1) Create a Feature file in resources/features folder
+    ```
+    A Feature File is an entry point to the Cucumber tests. This is a file where you will describe your tests 
+    in Descriptive language (Like English). It is an essential part of Cucumber, as it serves as an automation test script as well as live documents. 
+    A feature file can contain a scenario or can contain many scenarios in a single feature file but it usually contains a list of scenarios.
+    ```
+2) Run the feature file and it will generate blank stubs
+3) Copy the stubs
+4) Create step definition class for the feature
+    ```
+    A Step Definition is a Java method with an expression that links it to one or more Gherkin steps. When Cucumber executes a Gherkin step in a scenario, 
+    it will look for a matching step definition to execute.
+    ```
+5) Paste the empty stubs in step definition class and write its implementations
+6) Create RunTest.java file illustrating the link between feature file and the step definition
+
+####To illustrate how this works, look at the following Gherkin Scenario:
+
+```
+ Scenario: Some cukes
+    Given I have 48 cukes in my belly
+```
+
+The `I have 48 cukes in my belly` part of the step (the text following the `Given` keyword) will match the following step definition:
+
+```
+package foo;
+import cucumber.api.java.en.Given;
+
+public class MyStepdefs {
+    @Given("I have (\\d+) cukes in my belly")
+    public void i_have_n_cukes_in_my_belly(int cukes) {
+        System.out.format("Cukes: %n\n", cukes);
+    }
+}
+```