diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/IndexerApplication.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/IndexerApplication.java
index 5212838ceb83d99f731db9d417f8362d193ed5b4..576591960804133c92ed7b62309012fcd81b0726 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/IndexerApplication.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/IndexerApplication.java
@@ -5,9 +5,10 @@ import org.springframework.boot.actuate.autoconfigure.elasticsearch.ElasticSearc
 import org.springframework.boot.actuate.autoconfigure.security.servlet.ManagementWebSecurityAutoConfiguration;
 import org.springframework.boot.autoconfigure.SpringBootApplication;
 import org.springframework.boot.autoconfigure.security.servlet.SecurityAutoConfiguration;
+import org.springframework.context.annotation.ComponentScan;
 import org.springframework.context.annotation.Configuration;
 
-@Configuration
+@ComponentScan({"org.opengroup.osdu"})
 @SpringBootApplication(exclude = {ElasticSearchRestHealthIndicatorAutoConfiguration.class, SecurityAutoConfiguration.class, ManagementWebSecurityAutoConfiguration.class})
 public class IndexerApplication {
     public static void main( String[] args )
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/AttributeParsingServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/AttributeParsingServiceImpl.java
index ac4d6809c4777661af005a500c1348e4ba660da9..e16b2faffcb20db4fa508ce90fd1ba26ce92ca58 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/AttributeParsingServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/AttributeParsingServiceImpl.java
@@ -27,7 +27,6 @@ import org.opengroup.osdu.indexer.model.ElasticType;
 import org.opengroup.osdu.indexer.model.IndexSchema;
 import org.opengroup.osdu.indexer.util.parser.DateTimeParser;
 import org.opengroup.osdu.indexer.util.parser.NumberParser;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 import org.springframework.web.context.annotation.RequestScope;
 
@@ -53,7 +52,6 @@ public class AttributeParsingServiceImpl implements IAttributeParsingService {
     @Inject
     private GeometryConversionService geometryConversionService;
     @Inject
-    @Lazy
     private JobStatus jobStatus;
 
     @Override
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/CronServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/CronServiceImpl.java
index 697affadbf6ce7344c9babdb6b7988b4fdb79267..abb7f2a8212ef24ee24acd2fec5b532e0aa4ba47 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/CronServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/CronServiceImpl.java
@@ -25,7 +25,6 @@ import org.opengroup.osdu.is.core.provider.interfaces.util.IRequestInfo;
 import org.opengroup.osdu.is.core.service.IndicesService;
 import org.opengroup.osdu.is.core.util.ElasticClientHandler;
 import org.springframework.beans.factory.annotation.Value;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 
 import javax.inject.Inject;
@@ -44,7 +43,6 @@ public class CronServiceImpl implements CronService{
         @Inject
         private IndicesService indicesService;
         @Inject
-        @Lazy
         private JaxRsDpsLog log;
 
         @Value("${CRON_INDEX_CLEANUP_THRESHOLD_DAYS}")
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ElasticSettingServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ElasticSettingServiceImpl.java
index be30cfa5d2ba414b41a791f9d0b9f96957f50e44..3b599e030d5f639c34259baa191b247f6c2b677b 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ElasticSettingServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ElasticSettingServiceImpl.java
@@ -24,7 +24,6 @@ import org.opengroup.osdu.is.core.provider.interfaces.persistence.ElasticReposit
 import org.opengroup.osdu.is.core.service.ElasticSettingService;
 import org.opengroup.osdu.is.core.service.TenantInfoService;
 import org.springframework.beans.factory.annotation.Value;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 import javax.inject.Inject;
 
@@ -38,7 +37,6 @@ public class ElasticSettingServiceImpl implements ElasticSettingService {
     @Inject
     private IElasticCredentialsCache elasticCredentialCache;
     @Inject
-    @Lazy
     private JaxRsDpsLog log;
 
     @Value("${GAE_SERVICE}")
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexSchemaServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexSchemaServiceImpl.java
index 90c7d67e2571ce7ad73142aa190b0d3eb997722a..f35cd0d0c6f2b0931697fb4701cbd538e85a767b 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexSchemaServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexSchemaServiceImpl.java
@@ -32,7 +32,6 @@ import org.opengroup.osdu.is.core.service.IndicesService;
 import org.opengroup.osdu.is.core.util.ElasticClientHandler;
 import org.opengroup.osdu.is.core.util.ElasticIndexNameResolver;
 import org.apache.http.HttpStatus;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 
 import javax.inject.Inject;
@@ -49,7 +48,6 @@ public class IndexSchemaServiceImpl implements IndexSchemaService {
     private final Gson gson = new Gson();
 
     @Inject
-    @Lazy
     private JaxRsDpsLog log;
     @Inject
     private StorageService storageService;
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerMappingServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerMappingServiceImpl.java
index a306ed9870d29f819e10d5059bdf25805760aafd..2020c8dc1f840c01c706de3bf8d5ba0b5127c6b2 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerMappingServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerMappingServiceImpl.java
@@ -46,7 +46,6 @@ import org.opengroup.osdu.is.core.model.RecordMetaAttribute;
 import org.opengroup.osdu.is.core.service.MappingServiceImpl;
 import org.opengroup.osdu.is.core.util.ElasticClientHandler;
 import org.opengroup.osdu.is.core.util.Preconditions;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 import javax.inject.Inject;
 
@@ -54,7 +53,6 @@ import javax.inject.Inject;
 public class IndexerMappingServiceImpl extends MappingServiceImpl implements IndexerMappingService {
 
     @Inject
-    @Lazy
     private JaxRsDpsLog log;
     @Inject
     private ElasticClientHandler elasticClientHandler;
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerServiceImpl.java
index e28d5f4ae8e5dc5f9ffdda0107ca1a9cd6009a3a..d654d7471e67f1940d74a7741af59cad6481a2d9 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/IndexerServiceImpl.java
@@ -52,7 +52,6 @@ import org.opengroup.osdu.is.core.util.ElasticClientHandler;
 import org.opengroup.osdu.is.core.util.ElasticIndexNameResolver;
 import org.apache.commons.beanutils.PropertyUtils;
 import org.apache.commons.beanutils.NestedNullException;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Service;
 
 import javax.inject.Inject;
@@ -73,7 +72,6 @@ public class IndexerServiceImpl implements IndexerService {
     private final Gson gson = new Gson();
 
     @Inject
-    @Lazy
     private JaxRsDpsLog jaxRsDpsLog;
     @Inject
     private AuditLogger auditLogger;
@@ -98,7 +96,6 @@ public class IndexerServiceImpl implements IndexerService {
     @Inject
     private IRequestInfo requestInfo;
     @Inject
-    @Lazy
     private JobStatus jobStatus;
 
     private DpsHeaders headers;
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ReindexServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ReindexServiceImpl.java
index 9ffbe27f65ac9c0238835731f2eaa19cd0a07b79..55923873df0aaced88dbd041854acaecf2a4e966 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ReindexServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/ReindexServiceImpl.java
@@ -28,7 +28,6 @@ import org.opengroup.osdu.indexer.util.IndexerQueueTaskBuilder;
 import org.opengroup.osdu.core.common.model.indexer.RecordInfo;
 import org.opengroup.osdu.is.core.model.RecordChangedMessages;
 import org.opengroup.osdu.is.core.provider.interfaces.util.IRequestInfo;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 
 import javax.inject.Inject;
@@ -48,7 +47,6 @@ public class ReindexServiceImpl implements ReindexService {
     @Inject
     private IRequestInfo requestInfo;
     @Inject
-    @Lazy
     private JaxRsDpsLog jaxRsDpsLog;
 
     @Override
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/StorageServiceImpl.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/StorageServiceImpl.java
index 7ebd3a01d07dd4f8fef43884fedc7bb9b7da4d69..c3eff1efc43dde8c29fba9c7f84a95f4cf9b3eef 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/StorageServiceImpl.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/service/StorageServiceImpl.java
@@ -33,7 +33,6 @@ import org.opengroup.osdu.is.core.provider.interfaces.util.IRequestInfo;
 import org.opengroup.osdu.is.core.service.UrlFetchService;
 import org.apache.http.HttpStatus;
 import org.springframework.beans.factory.annotation.Value;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 
 import javax.inject.Inject;
@@ -58,12 +57,10 @@ public class StorageServiceImpl implements StorageService {
     @Inject
     private UrlFetchService urlFetchService;
     @Inject
-    @Lazy
     private JobStatus jobStatus;
     @Inject
     private IRequestInfo requestInfo;
     @Inject
-    @Lazy
     private JaxRsDpsLog jaxRsDpsLog;
 
     @Value("${STORAGE_SCHEMA_HOST}")
diff --git a/indexer-core/src/main/java/org/opengroup/osdu/indexer/util/IndexerQueueTaskBuilder.java b/indexer-core/src/main/java/org/opengroup/osdu/indexer/util/IndexerQueueTaskBuilder.java
index be471073b6a15c4c6d87d0bcdb8aafc4773ed501..6f85b496957f135f88394821784fbb953198fc85 100644
--- a/indexer-core/src/main/java/org/opengroup/osdu/indexer/util/IndexerQueueTaskBuilder.java
+++ b/indexer-core/src/main/java/org/opengroup/osdu/indexer/util/IndexerQueueTaskBuilder.java
@@ -23,7 +23,6 @@ import org.opengroup.osdu.is.core.httpclient.HttpResponse;
 import org.opengroup.osdu.is.core.model.CloudTaskRequest;
 import org.opengroup.osdu.is.core.service.UrlFetchService;
 import org.springframework.beans.factory.annotation.Value;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 import org.springframework.web.context.annotation.RequestScope;
 
@@ -41,7 +40,6 @@ public class IndexerQueueTaskBuilder {
     @Inject
     private UrlFetchService urlFetchService;
     @Inject
-    @Lazy
     private JaxRsDpsLog jaxRsDpsLog;
 
     @Value("${INDEXER_QUEUE_HOST}")
diff --git a/provider/indexer-aws/src/main/java/org/opengroup/osdu/indexer/aws/IndexerAwsApplication.java b/provider/indexer-aws/src/main/java/org/opengroup/osdu/indexer/aws/IndexerAwsApplication.java
index 3a9799282f5a8efc156efe6ec9f76f2eabf1519f..919a190ef9a751b0a8bbccaa1dd778522eff3397 100644
--- a/provider/indexer-aws/src/main/java/org/opengroup/osdu/indexer/aws/IndexerAwsApplication.java
+++ b/provider/indexer-aws/src/main/java/org/opengroup/osdu/indexer/aws/IndexerAwsApplication.java
@@ -8,8 +8,7 @@ import org.springframework.context.annotation.ComponentScan;
 import org.springframework.context.annotation.Configuration;
 
 @SpringBootApplication(exclude = { SecurityAutoConfiguration.class, ManagementWebSecurityAutoConfiguration.class })
-@Configuration
-@ComponentScan({"org.opengroup.osdu.is.core", "org.opengroup.osdu.indexer"})
+@ComponentScan({"org.opengroup.osdu"})
 public class IndexerAwsApplication {
 
     public static void main(String[] args) {
diff --git a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/IndexerAzureApplication.java b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/IndexerAzureApplication.java
index 487568dec3f716ae3e765252a63a14ec8b54875c..8cef929b830f610b1c87efe6614f871e420f9d5f 100644
--- a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/IndexerAzureApplication.java
+++ b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/IndexerAzureApplication.java
@@ -20,7 +20,7 @@ import org.springframework.context.annotation.ComponentScan;
 import org.springframework.context.annotation.Configuration;
 
 @SpringBootApplication
-@ComponentScan({"org.opengroup.osdu.is.core", "org.opengroup.osdu.indexer"})
+@ComponentScan({"org.opengroup.osdu"})
 public class IndexerAzureApplication {
 
     public static void main(String[] args) {
diff --git a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/publish/PublisherImpl.java b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/publish/PublisherImpl.java
index 62816fd3e1ff0b537098d1b29f9ce0eb07183f67..edf0f4343cae15380aedbeb678be92510b8d9ac7 100644
--- a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/publish/PublisherImpl.java
+++ b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/publish/PublisherImpl.java
@@ -30,7 +30,6 @@ import org.opengroup.osdu.core.common.model.indexer.RecordStatus;
 import org.opengroup.osdu.core.common.service.is.JaxRsDpsLog;
 import org.opengroup.osdu.core.common.spi.indexer.IPublisher;
 import org.opengroup.osdu.is.core.model.RecordChangedMessages;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 import org.springframework.web.context.annotation.RequestScope;
 
diff --git a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/util/ServiceAccountJwtClientImpl.java b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/util/ServiceAccountJwtClientImpl.java
index 34f8ca6f8213506e0feb7cb32088b60496ec502a..473f4f0428871473e19daaa60b68c789f8f2645b 100644
--- a/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/util/ServiceAccountJwtClientImpl.java
+++ b/provider/indexer-azure/src/main/java/org/opengroup/osdu/indexer/azure/util/ServiceAccountJwtClientImpl.java
@@ -31,7 +31,6 @@ import org.opengroup.osdu.is.core.model.IdToken;
 import org.opengroup.osdu.is.core.provider.interfaces.cache.IJwtCache;
 import org.opengroup.osdu.is.core.provider.interfaces.util.IHeadersInfo;
 import org.opengroup.osdu.is.core.provider.interfaces.util.IServiceAccountJwtClient;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 import org.springframework.web.context.annotation.RequestScope;
 
diff --git a/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/IndexerGcpApplication.java b/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/IndexerGcpApplication.java
index 7a3f9c21007c16a1b92e9cecac7bd0b108031996..f5c7f4d75f81c4a71fa74662abe2fa1df5fa0a86 100644
--- a/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/IndexerGcpApplication.java
+++ b/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/IndexerGcpApplication.java
@@ -8,8 +8,7 @@ import org.springframework.context.annotation.ComponentScan;
 import org.springframework.context.annotation.Configuration;
 
 @SpringBootApplication(exclude = { SecurityAutoConfiguration.class, ManagementWebSecurityAutoConfiguration.class })
-@Configuration
-@ComponentScan({"org.opengroup.osdu.is.core", "org.opengroup.osdu.indexer"})
+@ComponentScan({"org.opengroup.osdu"})
 public class IndexerGcpApplication {
 
     public static void main(String[] args) {
diff --git a/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/util/ServiceAccountJwtGcpClientImpl.java b/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/util/ServiceAccountJwtGcpClientImpl.java
index e1e448f0eb50a852954a8262ecd82cac1c2b5e69..94f35f845cf0b84ea4502cbc9d387071d329a2ff 100644
--- a/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/util/ServiceAccountJwtGcpClientImpl.java
+++ b/provider/indexer-gcp/src/main/java/org/opengroup/osdu/indexer/util/ServiceAccountJwtGcpClientImpl.java
@@ -47,7 +47,6 @@ import org.opengroup.osdu.is.core.model.IdToken;
 import org.opengroup.osdu.is.core.provider.interfaces.cache.IJwtCache;
 import org.opengroup.osdu.is.core.provider.interfaces.util.IServiceAccountJwtClient;
 import org.springframework.beans.factory.annotation.Value;
-import org.springframework.context.annotation.Lazy;
 import org.springframework.stereotype.Component;
 import org.springframework.web.context.annotation.RequestScope;
 
diff --git a/provider/indexer-gcp/src/main/resources/application.properties b/provider/indexer-gcp/src/main/resources/application.properties
index e2ad3b5c5265e99ad843bb166ba8fc11b40d9065..1bd1930e1bed42ccb05561e81dc6d3350a85995f 100644
--- a/provider/indexer-gcp/src/main/resources/application.properties
+++ b/provider/indexer-gcp/src/main/resources/application.properties
@@ -24,7 +24,7 @@ DEFAULT_DATA_COUNTRY=US
 
 AUTHORIZE_API=https://entitlements-dot-opendes.appspot.com/entitlements/v1
 LEGALTAG_API=https://os-legal-dot-opendes.appspot.com/api/legal/v1
-CRS_API=https://crs-converter-dot-opendes.appspot.com/api/crs/v1
+CRS_API=https://crs-converter-gae-dot-opendes.appspot.com/api/crs/v1
 
 
 #Default Cache Settings
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java
deleted file mode 100644
index 2966fd5dad34fa1e5156082834e8ac5027ef6dab..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/DeleteSchemaSteps.java
+++ /dev/null
@@ -1,108 +0,0 @@
-package org.opengroup.osdu.common;
-
-import com.sun.jersey.api.client.ClientResponse;
-
-import cucumber.api.DataTable;
-
-import org.opengroup.osdu.models.Setup;
-import org.opengroup.osdu.models.TestIndex;
-import org.opengroup.osdu.response.ErrorResponseMock;
-import org.opengroup.osdu.util.Config;
-import org.opengroup.osdu.util.ElasticUtils;
-import org.opengroup.osdu.util.HTTPClient;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.*;
-
-
-public class DeleteSchemaSteps extends TestsBase {
-
-    private static String timeStamp = String.valueOf(System.currentTimeMillis());
-
-    private Map<String, String> headers = httpClient.getCommonHeader();
-
-    private static boolean dunit = false;
-
-    private String deleteUrl;
-
-    private Map<String, TestIndex> inputRecordMap = new HashMap<>();
-
-    public DeleteSchemaSteps(HTTPClient httpClient) {
-        super(httpClient);
-    }
-
-    public DeleteSchemaSteps(HTTPClient httpClient, ElasticUtils elasticUtils) {
-        super(httpClient, elasticUtils);
-    }
-
-    /******************One time cleanup for whole feature**************/
-    public void tearDown() {
-        for (String kind : inputRecordMap.keySet()) {
-            TestIndex testIndex = inputRecordMap.get(kind);
-            testIndex.cleanupIndex();
-        }
-    }
-
-    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
-        List<Setup> inputlist = dataTable.asList(Setup.class);
-        for (Setup input : inputlist) {
-            TestIndex testIndex = getTextIndex();
-            testIndex.setHttpClient(httpClient);
-            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
-            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
-            testIndex.setMappingFile(input.getMappingFile());
-            testIndex.setRecordFile(input.getRecordFile());
-            inputRecordMap.put(testIndex.getKind(), testIndex);
-        }
-        /******************One time setup for whole feature**************/
-        if (!dunit) {
-            Runtime.getRuntime().addShutdownHook(new Thread() {
-                public void run() {
-                    tearDown();
-                }
-            });
-            dunit = true;
-            for (String kind : inputRecordMap.keySet()) {
-                TestIndex testIndex = inputRecordMap.get(kind);
-                testIndex.setupIndex();
-            }
-        }
-    }
-
-    public void i_send_a_delete_request_with(String kind) throws Throwable {
-        String actualKind = generateActualName(kind, timeStamp);
-        deleteUrl = String.format(this.getApi(), actualKind);
-    }
-
-    public void the_index_should_get_delete_and_I_should_get_response(int code) throws Throwable {
-        ClientResponse clientResponse = executeGetRequest(deleteUrl, headers, httpClient.getAccessToken());
-        assertEquals(code, clientResponse.getStatus());
-    }
-
-    public void i_should_get_response_with_reason_message_and_errors(List<Integer> codes, String type, String msg,
-                                                                     String error) throws Throwable {
-        ErrorResponseMock response = executeQuery(deleteUrl, null, headers, httpClient.getAccessToken(), ErrorResponseMock.class);
-        assertTrue(codes.contains(response.getResponseCode()));
-        if (response.getErrors() != null) {
-            error = generateActualName(error, timeStamp);
-            assertEquals(generateActualName(error,timeStamp), response.getErrors().get(0));
-        }
-        assertNotNull(response.getMessage());
-        assertNotNull(response.getReason());
-        assertEquals(type.toLowerCase(), response.getReason().toLowerCase());
-        assertEquals(generateActualName(msg,timeStamp), response.getMessage());
-    }
-
-    @Override
-    protected String getHttpMethod() {
-        return "DELETE";
-    }
-
-    @Override
-    protected String getApi() {
-        return Config.getSearchBaseURL() + "index/%s";
-    }
-}
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java
deleted file mode 100644
index 0993a09b34d8ab18087010210eef27bb3d34c7e8..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/GetSchemaSteps.java
+++ /dev/null
@@ -1,115 +0,0 @@
-package org.opengroup.osdu.common;
-
-import com.google.gson.JsonObject;
-import com.google.gson.JsonParser;
-
-import org.opengroup.osdu.models.Setup;
-import org.opengroup.osdu.models.TestIndex;
-import org.opengroup.osdu.response.ErrorResponseMock;
-import org.opengroup.osdu.util.Config;
-import org.opengroup.osdu.util.ElasticUtils;
-import org.opengroup.osdu.util.HTTPClient;
-
-import com.sun.jersey.api.client.ClientResponse;
-import cucumber.api.DataTable;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import static org.junit.Assert.assertEquals;
-
-public class GetSchemaSteps extends TestsBase {
-
-    private static String timeStamp = String.valueOf(System.currentTimeMillis());
-    private static boolean dunit = false;
-    private Map<String, String> headers = httpClient.getCommonHeader();
-    private Map<String, TestIndex> inputRecordMap = new HashMap<>();
-    private String schemaUrl;
-
-    public GetSchemaSteps(HTTPClient httpClient) {
-        super(httpClient);
-    }
-
-    public GetSchemaSteps(HTTPClient httpClient, ElasticUtils elasticUtils) {
-        super(httpClient, elasticUtils);
-    }
-
-
-
-    /******************One time cleanup for whole feature**************/
-    public void tearDown() {
-        for (String kind : inputRecordMap.keySet()) {
-            TestIndex testIndex = inputRecordMap.get(kind);
-            testIndex.cleanupIndex();
-        }
-    }
-
-    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) throws Throwable {
-        List<Setup> inputlist = dataTable.asList(Setup.class);
-        for (Setup input : inputlist) {
-            TestIndex testIndex = getTextIndex();
-            testIndex.setHttpClient(httpClient);
-            testIndex.setIndex(generateActualName(input.getIndex(), timeStamp));
-            testIndex.setKind(generateActualName(input.getKind(), timeStamp));
-            testIndex.setMappingFile(input.getMappingFile());
-            inputRecordMap.put(testIndex.getKind(), testIndex);
-        }
-        /******************One time setup for whole feature**************/
-        if (!dunit) {
-            Runtime.getRuntime().addShutdownHook(new Thread() {
-                public void run() {
-                    tearDown();
-                }
-            });
-            dunit = true;
-            for (String kind : inputRecordMap.keySet()) {
-                TestIndex testIndex = inputRecordMap.get(kind);
-                testIndex.addIndex();
-            }
-        }
-    }
-
-    public void i_send_get_schema_request_with(String kind) throws Throwable {
-        String actualKind = generateActualName(kind, timeStamp);
-        schemaUrl = String.format(this.getApi(), actualKind);
-    }
-
-    public void i_send_request_to_tenant(String tenant) throws Throwable {
-        headers = HTTPClient.overrideHeader(headers, getTenantMapping(tenant));
-    }
-
-    public void i_should_get_response_with_reason_message_and_errors(int responseCode, String type, String msg,
-                                                                     String error) throws Throwable {
-
-        ErrorResponseMock response = executeQuery(schemaUrl, null, headers, httpClient.getAccessToken(), ErrorResponseMock.class);
-        assertEquals(responseCode, response.getResponseCode());
-        if (response.getErrors() != null) {
-            error = generateActualName(error, timeStamp);
-            assertEquals(generateActualName(error, timeStamp), response.getErrors().get(0));
-        }
-        assertEquals(type, response.getReason());
-        assertEquals(generateActualName(msg, timeStamp),response.getMessage());
-    }
-
-    public void i_should_get_status_with_response(int statusCode, String response) throws Throwable {
-
-        ClientResponse schemaResponse = executeGetRequest(schemaUrl, headers, httpClient.getAccessToken());
-        assertEquals(statusCode, schemaResponse.getStatus());
-        String expectedResponse = generateActualName(response, timeStamp);
-        JsonObject expectedJson = new JsonParser().parse(expectedResponse).getAsJsonObject();
-        JsonObject actualJson = new JsonParser().parse(schemaResponse.getEntity(String.class)).getAsJsonObject();
-        assertEquals(expectedJson, actualJson);
-    }
-    
-    @Override
-    protected String getApi() {
-        return Config.getSearchBaseURL() + "index/schema/%s";
-    }
-
-    @Override
-    protected String getHttpMethod() {
-        return "GET";
-    }
-
-}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java
deleted file mode 100644
index 76a524806b6ab644d4c162a0f009668d9768f9cf..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/MappingSteps.java
+++ /dev/null
@@ -1,121 +0,0 @@
-package org.opengroup.osdu.common;
-
-import com.google.api.client.http.HttpMethods;
-import com.google.gson.Gson;
-import com.sun.jersey.api.client.ClientResponse;
-import cucumber.api.DataTable;
-import lombok.extern.java.Log;
-
-import java.util.*;
-
-import org.elasticsearch.cluster.metadata.MappingMetaData;
-import org.elasticsearch.common.collect.ImmutableOpenMap;
-import org.opengroup.osdu.models.Setup;
-import org.opengroup.osdu.request.Query;
-import org.opengroup.osdu.response.ResponseMock;
-import org.opengroup.osdu.util.Config;
-import org.opengroup.osdu.util.ElasticUtils;
-import org.opengroup.osdu.util.HTTPClient;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
-
-@Log
-public class MappingSteps extends TestsBase {
-    private Map<String, String> headers;
-    private String fieldName;
-    private String timeStamp;
-    private static boolean dunit = false;
-    private Query requestQuery = new Query();
-    private ElasticUtils elasticUtils = new ElasticUtils();
-    private Map<String, Object> requestPayload = new HashMap<>();
-
-    private static String updateIndexMappingUrl = Config.getIndexerBaseURL() + "kinds";
-    private static String searchQeueryURL=Config.getSearchBaseURL() + "query";    
-
-
-    public MappingSteps(HTTPClient httpClient) {
-        super(httpClient);
-        headers = httpClient.getCommonHeader();
-        fieldName="";
-        timeStamp = String.valueOf(System.currentTimeMillis());
-    }
-
-    public MappingSteps(HTTPClient httpClient, ElasticUtils elasticUtils) {
-        super(httpClient, elasticUtils);
-        headers = httpClient.getCommonHeader();
-        fieldName="";
-        timeStamp = String.valueOf(System.currentTimeMillis());
-    }
-
-    public void the_elastic_search_is_initialized_with_the_following_data(DataTable dataTable) {
-        if (!dunit) {
-            List<Setup> inputlist = dataTable.asList(Setup.class);
-            setUp(inputlist, timeStamp);
-            dunit = true;
-        }
-    }
-
-    public void i_update_in_to_enable_multifield_indexing(String fieldNameVal, String index1, String index2) throws Throwable {
-        Set<String> indices = new HashSet<>();
-        System.out.println("Indices "+ index1 + index2);
-        indices.add(generateActualName(index1,timeStamp));
-        indices.add(generateActualName(index2,timeStamp));
-        fieldName=fieldNameVal;
-        requestPayload.put("indices", indices);
-        requestPayload.put("operator", "keyword");
-    }
-
-    public void i_send_request_to_tenant(String tenant) {
-        headers = HTTPClient.overrideHeader(headers, getTenantMapping(tenant));
-    }
-
-    public void i_should_get_response(int responseCode) throws Throwable {
-        String payload = new Gson().toJson(requestPayload);
-        ClientResponse clientResponse = httpClient.send(HttpMethods.PUT, this.getApi()+"/"+this.fieldName, payload, headers, httpClient.getAccessToken());
-        assertEquals(responseCode, clientResponse.getStatus());
-    }
-
-    public void i_send_with(String query, String kind) {
-        requestQuery.setQuery(query);
-        requestQuery.setKind(generateActualName(kind, timeStamp));
-    }
-
-    public void i_send_None_with(String kind) {
-        requestQuery.setKind(generateActualName(kind, timeStamp));
-    }
-
-    public void i_aggregate_by(String aggField) throws Throwable {
-        requestQuery.setAggregateBy(aggField+".keyword");
-    }
-
-    public void i_should_get_in_response_records(int resultCount) {
-        String payload = requestQuery.toString();
-        ResponseMock response = executeQuery(searchQeueryURL, payload, this.headers, httpClient.getAccessToken(), ResponseMock.class);
-        assertEquals(200, response.getResponseCode());
-        assertEquals(resultCount, response.getResults().size());
-    }
-
-    public void i_want_to_validate_mapping(String indexOne, String indexTwo,String fieldName,String type) throws Throwable {
-    	ImmutableOpenMap<String, MappingMetaData> elasticMapping = elasticUtils.getMapping(generateActualName(indexOne,timeStamp));
-    	assertNotNull(elasticMapping);
-        MappingMetaData typeMapping = elasticMapping.get(type);
-        Map<String, Object> mapping = typeMapping.sourceAsMap();
-        assertNotNull(mapping);
-        String mappingJson = new Gson().toJson(mapping);
-        System.out.println(mappingJson);
-        assertTrue(mappingJson.contains(fieldName));
-        assertTrue(mappingJson.contains("raw"));
-    }
-    
-    @Override
-    protected String getApi() {
-        return updateIndexMappingUrl;
-    }
-
-    @Override
-    protected String getHttpMethod() {
-        return "POST";
-    }
-}
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java
index 8725680bb72a653d8d211425e5c77ab741d568b8..fe57b8cc49e8566c72ed28e4194fe666a4f886d7 100644
--- a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/RecordSteps.java
@@ -33,20 +33,16 @@ public class RecordSteps extends TestsBase {
     private Map<String, TestIndex> inputIndexMap = new HashMap<>();
     private boolean shutDownHookAdded = false;
 
-    private String timeStamp;
+    private String timeStamp = String.valueOf(System.currentTimeMillis());
     private List<Map<String, Object>> records;
-    private Map<String, String> headers;
+    private Map<String, String> headers = httpClient.getCommonHeader();
 
     public RecordSteps(HTTPClient httpClient) {
         super(httpClient);
-        timeStamp = String.valueOf(System.currentTimeMillis());
-        headers = httpClient.getCommonHeader();
     }
 
     public RecordSteps(HTTPClient httpClient, ElasticUtils elasticUtils) {
         super(httpClient, elasticUtils);
-        timeStamp = String.valueOf(System.currentTimeMillis());
-        headers = httpClient.getCommonHeader();
     }
     
     /******************One time cleanup for whole feature**************/
diff --git a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java
index 12d5ed607b69203ad000d10ad5ebeed91821e24c..43cd1b4d429d69acd6486d418fb93795d03b3319 100644
--- a/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java
+++ b/testing/indexer-test-core/src/main/java/org/opengroup/osdu/common/TestsBase.java
@@ -104,26 +104,12 @@ public abstract class TestsBase {
 
     protected abstract String getHttpMethod();
 
-    protected String executeQuery(String api, String payLoad, Map<String, String> headers, String token) {
-        ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), api, payLoad, headers, token);
-        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
-        log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
-        assertEquals(MediaType.APPLICATION_JSON, clientResponse.getType().toString());
-        return clientResponse.getEntity(String.class);
-    }
-
     protected <T extends ResponseBase> T executeQuery(String api, String payLoad, Map<String, String> headers, String token, Class<T> typeParameterClass) {
         ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), api, payLoad, headers, token);
         logCorrelationIdWithFunctionName(clientResponse.getHeaders());
         return getResponse(clientResponse, typeParameterClass);
     }
 
-    protected <T extends ResponseBase> T executeQuery(String payLoad, Map<String, String> headers, String token, Class<T> typeParameterClass) {
-        ClientResponse clientResponse = httpClient.send(this.getHttpMethod(), this.getApi(), payLoad, headers, token);
-        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
-        return getResponse(clientResponse, typeParameterClass);
-    }
-
     private <T extends ResponseBase> T getResponse(ClientResponse clientResponse, Class<T> typeParameterClass) {
         log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
         assertEquals(MediaType.APPLICATION_JSON, clientResponse.getType().toString());
@@ -135,18 +121,6 @@ public abstract class TestsBase {
         return response;
     }
 
-    protected ClientResponse executeGetRequest(String api, Map<String, String> headers, String token) {
-        return executeRequest(this.getHttpMethod(), api, headers, token);
-    }
-
-    protected ClientResponse executeRequest(String method, String api, Map<String, String> headers, String token) {
-        ClientResponse clientResponse = httpClient.send(method, api, null, headers, token);
-        if (clientResponse.getType() != null) {
-            log.info(String.format("Response status: %s, type: %s", clientResponse.getStatus(), clientResponse.getType().toString()));
-        }
-        logCorrelationIdWithFunctionName(clientResponse.getHeaders());
-        return clientResponse;
-    }
 
     private void logCorrelationIdWithFunctionName(MultivaluedMap<String, String> headers) {
         log.info(String.format("Scenario Name: %s, Correlation-Id: %s", scenario.getId(), headers.get("correlation-id")));
diff --git a/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature b/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature
deleted file mode 100644
index 519b648fa6840c9e320ffa1739a9e4723fe47030..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/delete/Delete.feature
+++ /dev/null
@@ -1,27 +0,0 @@
-Feature: Delete search indexes
-  If a user wants to delete any index, search should offer ways to do the same.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                     | index                                    | mappingFile | recordFile |
-      | tenant1:testdelete<timestamp>:well:1.0.0 | tenant1-testdelete<timestamp>-well-1.0.0 | records_1   | records_1  |
-      | tenant1:testdelete<timestamp>:well:2.0.0 | tenant1-testdelete<timestamp>-well-2.0.0 | records_2   | records_2  |
-
-
-  Scenario Outline: Delete a given index from the Search
-    When I send a delete request with <kind>
-    Then the index should get delete and I should get <response_code> response
-
-    Examples:
-      | kind                                       | response_code |
-      | "tenant1:testdelete<timestamp>:well:1.0.0" | 200           |
-
-  Scenario Outline: Fail the request for deletion of a index from the Search with invalid inputs
-    When I send a delete request with <kind>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | kind                                      | response_code | reponse_type           | response_message                                         | errors                                                                                                                                                                                  |
-      | "tenant1:testdelete<timestamp>:*:*"       | 400           | "Bad Request"          | "Invalid parameters were given on search request"        | "Not a valid record kind. Found: tenant1:testdelete<timestamp>:*:*, required format is partition:data-source-id:type:schema-version with no wildcards e.g. tenant:well:wellbore:1.0.2" |
-      | "tenant1:testdatasource:wellrating:9.0.0" | 404           | "Index deletion error" | "Kind tenant1:testdatasource:wellrating:9.0.0 not found" | ""                                                                                                                                                                                      |
-
diff --git a/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature b/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature
deleted file mode 100644
index ee526ad538bbb85f6e02e182d6e009508fb0e6a2..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/kindschema/KindSchema.feature
+++ /dev/null
@@ -1,26 +0,0 @@
-Feature: Get schema for a given kind
-  Allow user to find the attributes indexed and their respective data types.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                     | index                                    | mappingFile |
-      | tenant1:testschema<timestamp>:well:1.0.0 | tenant1-testschema<timestamp>-well-1.0.0 | records_1   |
-
-  Scenario Outline: Get a schema from search for a kind
-    When I send get schema request with <kind>
-    And I send request to tenant <tenant>
-    Then I should get <response_code> status with response <response_message>
-
-    Examples:
-      | tenant    | kind                                       | response_code | response_message																																																																																																																																																																								|
-      | "tenant1" | "tenant1:testschema<timestamp>:well:1.0.0" | 200           | "{"tenant1-testschema<timestamp>-well-1.0.0":{"mappings":{"well":{"properties":{"acl":{"properties":{"owners":{"type":"keyword"},"viewers":{"type":"keyword"}}},"legal":{"properties":{"legaltags":{"type":"keyword"},"otherRelevantDataCountries":{"type":"keyword"},"status":{"type":"keyword"}}},"data":{"properties":{"Basin":{"type":"text"},"Country":{"type":"text"},"County":{"type":"text"},"EmptyAttribute":{"type":"text"},"Established":{"type":"date"},"Field":{"type":"text"},"Location":{"type":"geo_point"},"OriginalOperator":{"type":"text"},"Rank":{"type":"integer"},"Score":{"type":"integer"},"State":{"type":"text"},"WellName":{"type":"text"},"WellStatus":{"type":"text"},"WellType":{"type":"text"}}},"id":{"type":"keyword"},"kind":{"type":"keyword"},"namespace":{"type":"keyword"},"type":{"type":"keyword"},"version":{"type":"keyword"},"x-acl":{"type":"keyword"}}}}}}" |
-
-  Scenario Outline: Fail request to get schema from search with invalid inputs
-    When I send get schema request with <kind>
-    And I send request to tenant <tenant>
-    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | tenant    | kind                                      | response_code | response_type    | response_message                                         | errors                                                                                                                                                                                  |
-      | "tenant1" | "tenant1-testschema<timestamp>:*:*"       | 400           | "Bad Request"    | "Invalid parameters were given on search request"        | "Not a valid record kind. Found: tenant1-testschema<timestamp>:*:*, required format is partition:data-source-id:type:schema-version with no wildcards e.g. tenant:well:wellbore:1.0.2" |
-      | "tenant1" | "tenant1:testdatasource:wellrating:9.0.0" | 404           | "Kind not found" | "Kind tenant1:testdatasource:wellrating:9.0.0 not found" | ""                                                                                                                                                                                      |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature b/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature
deleted file mode 100644
index 4e37a677da089ca41f5bdb4d7efc87e2e6f43060..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/query/crosscluster/Query.feature
+++ /dev/null
@@ -1,103 +0,0 @@
-Feature: Search with different queries
-  To allow a user to find his data quickly, search should offer multiple ways to search data.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                   | ownerGroup                  |
-      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes  | data.default.owners@opendes |
-      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes  | data.default.owners@opendes |
-      | common:testquery<timestamp>:well:1.0.0  | common-testquery<timestamp>-well-1.0.0  | records_1   | records_1  | data.default.viewers@opendes  | data.default.owners@opendes |
-      | common:testquery<timestamp>:well:2.0.0  | common-testquery<timestamp>-well-2.0.0  | records_2   | records_2  | data.default.viewers@opendes  | data.default.owners@opendes |
-
-	@ignore
-  Scenario Outline: Search data in a given kind
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenants <tenants>
-    Then I should get in response <count> records
-
-    Examples:
-      | tenants            | kind                                      | query          | limit | offset | returned_fields | count |
-      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "OSDU" | None  | None   | All             | 6     |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "OSDU" | None  | None   | All             | 3     |
-
-
-	@ignore
-  Scenario Outline: Search data in a given a kind with invalid inputs
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I send request to tenants <tenants>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | tenants            | kind                                | query | limit | offset | response_code | reponse_type    | response_message                                    | errors |
-      | "tenant2","common" | "*:testquery<timestamp>:well:1.0.0" | None  | None  | None   | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
-
-	@ignore
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I send request to tenants <tenants>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    Then I should get in response <count> records
-
-    Examples:
-      | tenants            | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
-      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 2     |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 1     |
-
-	@ignore
-  Scenario Outline: Search data across the kinds with distance inputs
-    When I send <query> with <kind>
-    And I send request to tenants <tenants>
-    And I apply geographical query on field <field>
-    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
-    Then I should get in response <count> records
-
-    Examples:
-      | tenants            | kind                                      | query               | field           | latitude | longitude | distance | count |
-      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | "Under development" | "data.Location" | 0        | 0         | 20000000 | 6     |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | "Under development" | "data.Location" | 0        | 0         | 20000000 | 3     |
-
-	@ignore
-  Scenario Outline: Search data across the kinds
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenants <tenants>
-    Then I should get in response <count> records
-
-    Examples:
-      | tenants            | kind                               | query                  | limit | offset | returned_fields | count |
-      | "tenant1","common" | "*:testquery<timestamp>:*:*"       | "OSDU OFFICE*" | 12    | None   | All             | 12    |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:*:*" | "OSDU OFFICE*" | 12    | None   | All             | 6     |
-
-
-	@ignore
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I send request to tenants <tenants>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    Then I should get in response <count> records
-
-    Examples:
-      | tenants            | kind                               | query | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
-      | "tenant1","common" | "*:testquery<timestamp>:*:*"       | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 6     |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 3     |
-
-	@ignore
-  Scenario Outline: Search data across the kinds with geo polygon inputs
-    When I send <query> with <kind>
-    And I send request to tenants <tenants>
-    And define geo polygon with following points <points_list>
-    And I apply geographical query on field <field>
-    Then I should get in response <count> records
-    Examples:
-      | tenants            | kind                                      | query | field           | points_list                                                               | count |
-      | "tenant1","common" | "*:testquery<timestamp>:well:1.0.0"       | None  | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904) | 4     |
-      | "tenant1","common" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904) | 2     |
diff --git a/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature b/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature
deleted file mode 100644
index 7b341d66585a55cfd057932d33e215c52d19f793..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/query/singlecluster/Query.feature
+++ /dev/null
@@ -1,205 +0,0 @@
-Feature: Search with different queries
-  To allow a user to find his data quickly, search should offer multiple ways to search data.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                  | ownerGroup 				           |
-      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes | data.default.owners@opendes  |
-      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes | data.default.testowners@opendes   |
-
-  Scenario Outline: Search data in a given kind
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenant <tenant>
-    Then I should get in response <count> records
-
-    Examples:
-      | tenant    | kind                                      | query                                | limit | offset | returned_fields | count |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4"      | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None                                 | 0     | None   | NULL            | 3     |
-      ######################################Range Query test cases##########################################################################
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:{1 TO 3}"                 | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:[10 TO 20]"               | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Rank:>=2"                      | None  | None   | All             | 2     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "data.Established:{* TO 2012-01-01}" | None  | None   | All             | 2     |
-      ######################################Text Query test cases###########################################################################
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | "OSDU"                               | None  | None   | All             | 3     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:2.0.0" | "data.OriginalOperator:OFFICE6"      | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | ""OFFICE2" \| OFFICE3"               | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:2.0.0" | "data.Well\*:(Data Lake Cloud)"      | None  | None   | All             | 3     |
-
-  Scenario Outline: Search data in a given a kind with invalid inputs
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I send request to tenant <tenant>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | tenant    | kind                                      | query | limit | offset | response_code | reponse_type    | response_message                                    | errors                                     |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | -1    | None   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'limit' must be equal or greater than 0"  |
-      | "tenant1" | "invalid"                                 | None  | 1     | None   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "Not a valid record kind. Found: invalid"  |
-      | "tenant1" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | 1     | -1     | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'offset' must be equal or greater than 0" |
-      | "tenant2" | "tenant1:testquery<timestamp>:well:1.0.0" | None  | None  | None   | 401           | "Access denied" | "The user is not authorized to perform this action" | ""                                         |
-
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    Then I should get in response <count> records
-
-    Examples:
-      | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None                         | "data.Location" | 45                | -100               | 0                     | 0                      | 2     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None                         | "data.Location" | 45                | -80                | 0                     | 0                      | 0     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | 0                     | 0                      | 1     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 10                | -100               | 0                     | 0                      | 0     |
-
-  Scenario Outline: Search data across the kinds with invalid bounding box inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | kind                                      | query                        | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | response_code | reponse_type  | response_message                                  | errors                                                                   |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | 0                  | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top latitude cannot be the same as bottom latitude: 0.0 == 0.0"         |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | -100               | -10                   | -100                   | 400           | "Bad Request" | "Invalid parameters were given on search request" | "left longitude cannot be the same as right longitude: -100.0 == -100.0" |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 10                | -100               | 10                    | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top latitude cannot be the same as bottom latitude: 10.0 == 10.0"       |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 45                | -100               | -95                   | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]"                         |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | 0                 | -100               | 10                    | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "top corner is below bottom corner: 0.0 vs. 10.0"                        |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "data.OriginalOperator:OFFICE4" | "data.Location" | None              | None               | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | "Invalid payload"                                                        |
-      | "tenant1:testquery<timestamp>:*:*"        | None                         | "officeAddress" | 45                | -100               | 0                     | 0                      | 400           | "Bad Request" | "Invalid parameters were given on search request" | ""                                                                       |
-
-  Scenario Outline: Search data across the kinds with distance inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
-    Then I should get in response <count> records
-
-    Examples:
-      | kind                                      | query               | field           | latitude | longitude | distance | count |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "Under development" | "data.Location" | 0        | 0         | 20000000 | 3     |
-      | "tenant1:testquery<timestamp>:*:*"        | "TEXAS OR TX"       | "data.Location" | 45       | -100      | 20000000 | 2     |
-
-  Scenario Outline: Search data across the kinds with invalid distance inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define focus coordinates as (<latitude>, <longitude>) and search in a <distance> radius
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | kind                               | query          | field           | latitude | longitude | distance | response_code | reponse_type  | response_message                                  | errors                                              |
-      | "tenant1:testquery<timestamp>:*:*" | "OFFICE - 2"          | "data.Location" | -45      | -200      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'longitude' value is out of the range [-180, 180]" |
-      | "tenant1:testquery<timestamp>:*:*" | "TEXAS OR USA" | "data.Location" | -95      | -100      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]"    |
-      | "tenant1:testquery<timestamp>:*:*" | "Harris"       | "ZipCode"       | -45      | -200      | 1000     | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'longitude' value is out of the range [-180, 180]" |
-
-  Scenario Outline: Search data across the kinds
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the offset of starting point as <offset>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenant <tenant>
-    Then I should get in response <count> records
-
-    Examples:
-      | tenant    | kind                               | query                               | limit | offset | returned_fields | count |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | 1     | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | None  | 2      | All             | 4     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | None                                | None  | None   | Country         | 6     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "OSDU OFFICE*"              | None  | None   | All             | 6     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "SCHLUM OFFICE"                     | None  | None   | All             | 6     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | ""SCHLUM OFFICE""                   | None  | None   | All             | 0     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.Country:USA"                  | None  | None   | All             | 2     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "TEXAS AND OFFICE3"                    | None  | None   | All             | 1     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.OriginalOperator:OFFICE5 OR OFFICE2" | None  | None   | All             | 2     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.OriginalOperator:STI OR HT"   | None  | None   | All             | 0     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "_exists_:data.Basin"               | None  | None   | All             | 4     |
-      | "tenant1" | "tenant1:testquery<timestamp>:*:*" | "data.Well\*:"Data Lake Cloud""     | None  | None   | All             | 5     |
-
-
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    Then I should get in response <count> records
-
-    Examples:
-      | kind                               | query | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | count |
-      | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 3     |
-      | "tenant1:testquery<timestamp>:*:*" | None  | "data.Location" | 10                | -100               | 0                     | 0                      | 0     |
-
-  Scenario Outline: Search data across the kinds with geo polygon inputs
-    When I send <query> with <kind>
-    And define geo polygon with following points <points_list>
-    And I apply geographical query on field <field>
-    Then I should get in response <count> records
-    Examples:
-      | kind                                      | query       | field           | points_list                                                                                                        | count |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904)                                          | 2     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (33.201112;-113.282863) , (33.456305;-98.269744) , (52.273184;-93.593904)                                          | 0     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "OFFICE4" | "data.Location" | (26.12362;-112.226716)  , (26.595873;-68.457186) , (52.273184;-93.593904)                                          | 1     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Location" | (14.29056;72.18936)     , (22.13762;72.18936)    , (22.13762;77.18936) , (14.29056;77.18936) , (14.29056;72.18936) | 1     |
-
-  Scenario Outline: Search data across the kinds with invalid geo polygon inputs
-    When I send <query> with <kind>
-    And define geo polygon with following points <points_list>
-    And I apply geographical query on field <field>
-    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | kind                                      | query | field           | points_list                                                                | response_code | response_type | response_message                                  | errors                                           |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (26.595873;-68.457186)   , (52.273184;-93.593904)                          | 400           | "Bad Request" | "Invalid parameters were given on search request" | "too few points defined for geo polygon query"   |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None  | "data.Location" | (516.595873;-68.457186)  , (52.273184;-94.593904) , (95.273184;-93.593904) | 400           | "Bad Request" | "Invalid parameters were given on search request" | "'latitude' value is out of the range [-90, 90]" |
-
-  Scenario Outline: Search data and sort the results with the given sort fields and order
-    When I send <query> with <kind>
-    And I want the results sorted by <sort>
-    Then I should get records in right order first record id: <first_record_id>, last record id: <last_record_id>
-    Examples:
-      | kind                                      | query       | sort                                                                         | first_record_id       | last_record_id        |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":["ASC"]}                                             | "test:well:1.0.0:1"   | "test:well:2.0.0:3"   |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":["DESC"]}                                            | "test:well:2.0.0:3"   | "test:well:1.0.0:1"   |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["namespace","data.Rank"],"order":["ASC","DESC"]}                   | "test:well:1.0.0:3"   | "test:well:2.0.0:1"   |
-
-  Scenario Outline: Search data in a given kind with invalid sort field
-    When I send <query> with <kind>
-    And I want the results sorted by <sort>
-    Then I should get <response_code> response with reason: <response_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | kind                                      | query       | sort                                           | response_code | response_type   | response_message                                    | errors                                                             |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":[],"order":["ASC"]}                   | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.field' can not be null or empty"                            |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":[]}                    | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.order' can not be null or empty"                            |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id","data.Rank"],"order":["DESC"]}  | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "'sort.field' and 'sort.order' size do not match"                  |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | {"field":["id"],"order":[null]}                | 400           | "Bad Request"   | "Invalid parameters were given on search request"   | "Not a valid order option. It can only be either 'ASC' or 'DESC'"                       |
-
-  Scenario Outline: Search data in a given kind with different searchAs modes
-    When I send <query> with <kind>
-    And I want to search as owner <is_owner>
-    Then I should get in response <count> records
-
-    Examples:
-      | kind                                      | query       | is_owner | count |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | true     | 3     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | false    | 3     |
-      | "tenant1:testquery<timestamp>:well:2.0.0" | None        | true     | 0     |
-      | "tenant1:testquery<timestamp>:well:2.0.0" | None        | false    | 3     |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | false    | 6     |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | true     | 3     |
-      | "tenant1:testquery<timestamp>:well:*"     | "OFFICE4" | true     | 1     |
-      | "tenant1:testquery<timestamp>:well:*"     | None        | None     | 6     |
-
-  Scenario Outline: Search data in a given kind with aggregateBy field
-    When I send <query> with <kind>
-    And I want to aggregate by <aggregateBy>
-    Then I should get <count> unique values
-
-    Examples:
-      | kind                                      | query       | aggregateBy | count |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "namespace" | 1     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "type"      | 1     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | "OFFICE4" | "data.Rank" | 1     |
-      | "tenant1:testquery<timestamp>:well:1.0.0" | None        | "data.Rank" | 3     |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature b/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature
deleted file mode 100644
index 5e078b7249c9c33c67675ddab0b7ba33e191803d..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/querybycursor/crosscluster/QueryByCursor.feature
+++ /dev/null
@@ -1,56 +0,0 @@
-Feature: Search recursively on cursor with different queries
-  To allow a user to find his data quickly, search should offer multiple ways to search data and iterate over all the results.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                     | index                                    | mappingFile | recordFile | viewerGroup                         | ownerGroup                          |
-      | tenant1:testcursor<timestamp>:well:1.0.0 | tenant1-testcursor<timestamp>-well-1.0.0 | records_1   | records_1  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1 |
-      | tenant1:testcursor<timestamp>:well:2.0.0 | tenant1-testcursor<timestamp>-well-2.0.0 | records_2   | records_2  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1 |
-      | common:testcursor<timestamp>:well:1.0.0  | common-testcursor<timestamp>-well-1.0.0  | records_1   | records_1  | data.search.integrationtest@common  | data.search.integrationtest@common  |
-      | common:testcursor<timestamp>:well:2.0.0  | common-testcursor<timestamp>-well-2.0.0  | records_2   | records_2  | data.search.integrationtest@common  | data.search.integrationtest@common  |
-
-	@ignore
-  Scenario Outline: Search recursively page by page data across the kinds
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenants <q1_tenants>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenants <q2_tenants>
-    Then I should get in response <final_count> records
-
-    Examples:
-      | q1_tenants         | q2_tenants         | kind                                | query                   | limit | returned_fields | first_count | final_count |
-      | "tenant1","common" | "tenant1","common" | "*:testcursor<timestamp>:*:*"       | "TX OR TEXAS OR FRANCE" | 3     | All             | 3           | 3           |
-      | "tenant1","common" | "tenant1","common" | "tenant1:testcursor<timestamp>:*:*" | "TX OR TEXAS OR FRANCE" | 3     | All             | 3           | 0           |
-
-
-	@ignore
-  Scenario Outline:  Search recursively page by page data across the kinds with invalid inputs and headers
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenants <q1_tenants>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenants <q2_tenants>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | q1_tenants         | q2_tenants         | kind                                 | query | limit | returned_fields | first_count | response_code | reponse_type    | response_message                                    | errors |
-      | "tenant1","common" | "tenant2","common" | "*:testcursor<timestamp>:well:1.0.0" | None  | 1     | All             | 1           | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
-
-	@ignore
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    And I limit the count of returned results to <limit>
-    And I send request to tenants <q1_tenants>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenants <q2_tenants>
-    Then I should get in response <final_count> records
-
-    Examples:
-      | q1_tenants         | q2_tenants         | kind                                       | query | limit | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | first_count | final_count |
-      | "tenant1","common" | "tenant1","common" | "*:testcursor<timestamp>:well:1.0.0"       | None  | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 4           | 0           |
-      | "tenant1","common" | "tenant1","common" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 2           | 0           |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature b/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature
deleted file mode 100644
index 83a5171560414c2dcbbd5f5df801993aa1b2d887..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/querybycursor/singlecluster/QueryByCursor.feature
+++ /dev/null
@@ -1,92 +0,0 @@
-Feature: Search recursively on cursor with different queries
-  To allow a user to find his data quickly, search should offer multiple ways to search data and iterate over all the results.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                     | index                                    | mappingFile | recordFile | viewerGroup                  | ownerGroup                  |
-      | tenant1:testcursor<timestamp>:well:1.0.0 | tenant1-testcursor<timestamp>-well-1.0.0 | records_1   | records_1  | data.default.viewers@opendes | data.default.owners@opendes |
-      | tenant1:testcursor<timestamp>:well:2.0.0 | tenant1-testcursor<timestamp>-well-2.0.0 | records_2   | records_2  | data.default.viewers@opendes | data.default.testowners@opendes |
-
-  Scenario Outline: Search recursively page by page data across the kinds
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenant <q1_tenant>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenant <q2_tenant>
-    Then I should get in response <final_count> records
-
-    Examples:
-      | q1_tenant | q2_tenant | kind                                | query                     | limit | returned_fields | first_count | final_count |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | None                      | 4     | All             | 4           | 2           |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | None                      | None  | All             | 6           | 0           |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "TX OR TEXAS OR FRANCE"   | 1     | All             | 1           | 1           |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "XdQQ6GCSNSBLTESTFAIL"    | 1     | All             | 0           | 0           |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:*:*" | "\"OFFICE2\" \| OFFICE3 \| OFFICE5" | 1     | All             | 1           | 1           |
-
-  Scenario Outline: Search recursively page by page data across the kinds with invalid inputs
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set an invalid cursor
-    And I send request to tenant <tenant>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | tenant    | kind                                       | query | limit | response_code | reponse_type                  | response_message                                  | errors                                    |
-      | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | None  | 400           | "Can't find the given cursor" | "The given cursor is invalid or expired"          | ""                                        |
-      | "tenant1" | "*:*:*"                                    | None  | 0     | 400           | "Bad Request"                 | "Invalid parameters were given on search request" | "Not a valid record kind. Found: *:*:*"   |
-      | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | -1    | 400           | "Bad Request"                 | "Invalid parameters were given on search request" | "'limit' must be equal or greater than 0" |
-
-  Scenario Outline:  Search recursively page by page data across the kinds with invalid inputs and headers
-    When I send <query> with <kind>
-    And I limit the count of returned results to <limit>
-    And I set the fields I want in response as <returned_fields>
-    And I send request to tenant <q1_tenant>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenant <q2_tenant>
-    Then I should get <response_code> response with reason: <reponse_type>, message: <response_message> and errors: <errors>
-
-    Examples:
-      | q1_tenant | q2_tenant | kind                                       | query | limit | returned_fields | first_count | response_code | reponse_type    | response_message                                    | errors |
-      | "tenant1" | "tenant2" | "tenant1:testcursor<timestamp>:well:1.0.0" | None  | 1     | All             | 1           | 401           | "Access denied" | "The user is not authorized to perform this action" | ""     |
-
-  Scenario Outline: Search data across the kinds with bounding box inputs
-    When I send <query> with <kind>
-    And I apply geographical query on field <field>
-    And define bounding box with points (<top_left_latitude>, <top_left_longitude>) and  (<bottom_right_latitude>, <bottom_right_longitude>)
-    And I limit the count of returned results to <limit>
-    And I send request to tenant <q1_tenant>
-    Then I should get in response <first_count> records along with a cursor
-    And I send request to tenant <q2_tenant>
-    Then I should get in response <final_count> records
-
-    Examples:
-      | q1_tenant | q2_tenant | kind                                       | query  | limit | field           | top_left_latitude | top_left_longitude | bottom_right_latitude | bottom_right_longitude | first_count | final_count |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | None   | None  | "data.Location" | 45                | -100               | 0                     | 0                      | 2           | 0           |
-      | "tenant1" | "tenant1" | "tenant1:testcursor<timestamp>:well:1.0.0" | "OFFICE4" | 1     | "data.Location" | 45                | -110               | 0                     | 0                      | 1           | 0           |
-
-  Scenario Outline: Search data and sort the results with the given sort fields and order
-    When I send <query> with <kind>
-    And I want the results sorted by <sort>
-    Then I should get records in right order first record id: <first_record_id>, last record id: <last_record_id>
-    Examples:
-      | kind                                      | query       | sort                                                                         | first_record_id       | last_record_id        |
-      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["id"],"order":["ASC"]}                                             | "test:well:1.0.0:1"   | "test:well:2.0.0:3"   |
-      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["id"],"order":["DESC"]}                                            | "test:well:2.0.0:3"   | "test:well:1.0.0:1"   |
-      | "tenant1:testcursor<timestamp>:well:*"    | None        | {"field":["namespace","data.Rank"],"order":["ASC","DESC"]}                   | "test:well:1.0.0:3"   | "test:well:2.0.0:1"   |
-
-  Scenario Outline: Search data in a given kind with different searchAs modes
-    When I send <query> with <kind>
-    And I want to search as owner <is_owner>
-    Then I should get in response <count> records
-
-    Examples:
-      | kind                                       | query       | is_owner | count |
-      | "tenant1:testcursor<timestamp>:well:1.0.0" | None        | true     | 3     |
-      | "tenant1:testcursor<timestamp>:well:1.0.0" | None        | false    | 3     |
-      | "tenant1:testcursor<timestamp>:well:2.0.0" | None        | true     | 0     |
-      | "tenant1:testcursor<timestamp>:well:2.0.0" | None        | false    | 3     |
-      | "tenant1:testcursor<timestamp>:well:*"     | None        | false    | 6     |
-      | "tenant1:testcursor<timestamp>:well:*"     | None        | true     | 3     |
-      | "tenant1:testcursor<timestamp>:well:*"     | "OFFICE4"| true     | 1     |
-      | "tenant1:testcursor<timestamp>:well:*"     | None        | None     | 6     |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature b/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature
deleted file mode 100644
index 408319b40c2c570d8f72ff07ab4333545b0d5ace..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/smart/parser/SmartParser.feature
+++ /dev/null
@@ -1,8 +0,0 @@
-Feature: Smart Parser
-  To allow a client to parse a smart search query into a full search query syntax.
-
-  Scenario: Parse smart search input to query api input
-    When I generate smart search input with "text" and "well"
-    Then I get a response matching
-
-
diff --git a/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature b/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature
deleted file mode 100644
index 3da32045c661d617ce821555256471b6e0f06d71..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/smart/search/Smart.feature
+++ /dev/null
@@ -1,30 +0,0 @@
-Feature: Smart search
-  To allow a client to get the available filters, find the possible values to use for the filter and perform a search based on a filter and value.
-
-  Background: 
-    Given the elastic search is initialized with the following data
-      | kind                                    | index                                   | mappingFile | recordFile | viewerGroup                         | ownerGroup                               |
-      | tenant1:testquery<timestamp>:well:1.0.0 | tenant1-testquery<timestamp>-well-1.0.0 | records_1   | records_1  | data.search.integrationtest@tenant1 | data.search.integrationtest@tenant1      |
-      | tenant1:testquery<timestamp>:well:2.0.0 | tenant1-testquery<timestamp>-well-2.0.0 | records_2   | records_2  | data.search.integrationtest@tenant1 | data.search.integrationtestowner@tenant1 |
-  # TODO: Enable the test when ECE CCS is utilized, the test looks correct, but it does not correspond to the current stopgap specification
-  @ignore
-  Scenario Outline: Perform smart search based on query
-    When I synchronize the values in cache
-    When I list all filters on tenants <tenants>
-    Then I get the list of all available filters
-    Then I take the first filter and list values when query is <query>
-    Then I search with the filter and values and <limit> and <offset> for matching response
-
-    Examples: 
-      | tenants            | query | limit | offset |
-      | "common","tenant1" | "w"   |     2 |      1 |
-  @ignore
-  Scenario Outline: Perform smart search based on filters and values
-    When I list all filters on tenants <tenants>
-    When I take the <filter> and value <value>
-    Then I search response matching for offset <offset> and limit <limit>
-
-    Examples: 
-      | tenants            | limit | offset | filter           | value |
-      | "common","tenant1" |     1 |      0 | "text"           | "S"   |
-      | "common","tenant1" |     1 |      0 | "Field,Operator" | "S"   |
\ No newline at end of file
diff --git a/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature b/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature
deleted file mode 100644
index 23271eea8cc6acf1f7b7685a1c6537371b022ee9..0000000000000000000000000000000000000000
--- a/testing/indexer-test-core/src/main/resources/features/updateindex/UpdateIndexMapping.feature
+++ /dev/null
@@ -1,20 +0,0 @@
-Feature: Updating elastic index mapping
-  This feature deals with updating Index mapping in Elastic Search.
-
-  Background:
-    Given the elastic search is initialized with the following data
-      | kind                                            | index                                           | mappingFile | recordFile | viewerGroup                  | ownerGroup                               |
-      | tenant1:testupdatemapping<timestamp>:well:1.0.0 | tenant1-testupdatemapping<timestamp>-well-1.0.0 | records_3   | records_3  | data.default.viewers@opendes | data.default.owners@opendes |
-      | tenant1:testupdatemapping<timestamp>:well:2.0.0 | tenant1-testupdatemapping<timestamp>-well-2.0.0 | records_3   | records_3  | data.default.viewers@opendes | data.default.owners@opendes |
-
-  Scenario Outline: Update indices to enable multifield indexing
-    When I update <fieldName> in <indices> to enable multifield indexing
-    And I send request to tenant <tenant>
-    Then I should get <response_code> response
-    Then I want to validate mapping by <indices> and <fieldName> and <type>
-    When I send <query> with <kind>
-    And I want to aggregate by <fieldName>
-    Then I should get in response <count> records
-    Examples:
-      | tenant    | fieldName | type   | kind                                      | indices                                                                                              | response_code | query | count |
-      | "tenant1" | "Center"  | "well" |"tenant1:testupdatemapping<timestamp>:*:*" | "tenant1-testupdatemapping<timestamp>-well-1.0.0" ,"tenant1-testupdatemapping<timestamp>-well-2.0.0" | 200           | None  | 6     |
\ No newline at end of file