diff --git a/.gitignore b/.gitignore
index c0a344de55d094bdc3ad65171f4786ec81529b04..76350d1e38eea081d12aaf81ea486a59b742b012 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,8 @@ dev.tfvars
 
 infra-azure-provisioning.sln
 
+osdu
+
 .envrc*
 *.output
 
diff --git a/docs/configuration-data.md b/docs/configuration-data.md
index 21aeeb2d87b729e85f5b8dbf70137b1b4c98e101..19f8d56f407548a68852d8bef55f321c90c68881 100644
--- a/docs/configuration-data.md
+++ b/docs/configuration-data.md
@@ -2,18 +2,20 @@
 
 __Repository__
 
-Clone the following repository and have them in the same file structure level as infra repository using following commands
+Create an 'osdu' directory and clone the following repositories into it using these commands:
 
 ```bash
 // you should be at one directory level above infra-azure-provisioning
 export RELEASE_VERSION=0.22
 export BRANCH_NAME="release/$RELEASE_VERSION" // replace the branch name with latest release.
 
+mkdir osdu && cd osdu
 git clone -b $BRANCH_NAME --single-branch https://community.opengroup.org/osdu/platform/system/reference/crs-catalog-service.git
 git clone -b $BRANCH_NAME --single-branch https://community.opengroup.org/osdu/platform/system/reference/crs-conversion-service.git
 git clone -b $BRANCH_NAME --single-branch https://community.opengroup.org/osdu/platform/data-flow/ingestion/ingestion-dags.git
 git clone -b $BRANCH_NAME --single-branch https://community.opengroup.org/osdu/platform/system/schema-service.git
 git clone -b $BRANCH_NAME --single-branch https://community.opengroup.org/osdu/platform/system/reference/unit-service.git
+cd ..
 ```
 ![File Structure](./images/directory-structure.png "File Structure")
 
@@ -32,16 +34,14 @@ az login --service-principal -u $ARM_CLIENT_ID -p $ARM_CLIENT_SECRET --tenant $A
 
 ## Upload Configuration Data
 
-3 services that required configuration data to be loaded into the configuration storage account file shares.
-
 ### Unit
 
 ```bash
 FILE_SHARE="unit"
-FILE=$(realpath ../unit-service/data/unit_catalog_v2.json)
+FILE=$(realpath osdu/unit-service/data/unit_catalog_v2.json)
 
 GROUP=$(az group list --query "[?contains(name, 'cr${UNIQUE}')].name" -otsv)
-ENV_VAULT=$(az keyvault list --resource-group $GROUP --query [].name -otsv)
+ENV_VAULT=$(az keyvault list --resource-group $GROUP --query "[].name" -otsv)
 
 az storage file upload \
   --account-name $(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage --query value -otsv) \
@@ -54,10 +54,10 @@ az storage file upload \
 
 ```bash
 FILE_SHARE="crs"
-FILE=$(realpath ../crs-catalog-service/data/crs_catalog_v2.json)
+FILE=$(realpath osdu/crs-catalog-service/data/crs_catalog_v2.json)
 
 GROUP=$(az group list --query "[?contains(name, 'cr${UNIQUE}')].name" -otsv)
-ENV_VAULT=$(az keyvault list --resource-group $GROUP --query [].name -otsv)
+ENV_VAULT=$(az keyvault list --resource-group $GROUP --query "[].name" -otsv)
 
 az storage file upload \
   --account-name $(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage --query value -otsv) \
@@ -70,18 +70,18 @@ az storage file upload \
 
 ```bash
 FILE_SHARE="crs-conversion"
-PROJECT_FOLDER=$(realpath ../crs-conversion-service)
+PROJECT_FOLDER=$(realpath osdu/crs-conversion-service)
 SOURCE_FOLDER="apachesis_setup/**"
 
 GROUP=$(az group list --query "[?contains(name, 'cr${UNIQUE}')].name" -otsv)
-ENV_VAULT=$(az keyvault list --resource-group $GROUP --query [].name -otsv)
+ENV_VAULT=$(az keyvault list --resource-group $GROUP --query "[].name" -otsv)
 
 az storage file upload-batch \
-  --account-name $(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage --query value -otsv) \
-  --account-key $(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage-key --query value -otsv) \
-  --destination $FILE_SHARE \
-  --source ${PROJECT_FOLDER} \
-  --pattern ${SOURCE_FOLDER}
+  --account-name "$(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage --query value -otsv)" \
+  --account-key "$(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage-key --query value -otsv)" \
+  --destination "$FILE_SHARE" \
+  --source "$PROJECT_FOLDER" \
+  --pattern "$SOURCE_FOLDER"
 ```
 
 # Ingest Manifest DAGS
@@ -102,7 +102,7 @@ FILE_SHARE="airflow2dags"
 FILE_NAME="manifest_ingestion_dags.zip"
 
 GROUP=$(az group list --query "[?contains(name, 'cr${UNIQUE}')].name" -otsv)
-ENV_VAULT=$(az keyvault list --resource-group $GROUP --query [].name -otsv)
+ENV_VAULT=$(az keyvault list --resource-group $GROUP --query "[].name" -otsv)
 
 az storage file upload \
   --account-name $(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/airflow-storage --query value -otsv) \
diff --git a/infra/templates/osdu-r3-mvp/central_resources/main.tf b/infra/templates/osdu-r3-mvp/central_resources/main.tf
index bb415a861b6bc9cf7ea8432ff290e37f10563d1c..52db6ba830759cd842b3952b7109bc18939fab5b 100644
--- a/infra/templates/osdu-r3-mvp/central_resources/main.tf
+++ b/infra/templates/osdu-r3-mvp/central_resources/main.tf
@@ -193,6 +193,7 @@ module "storage_account" {
 
   name                = local.storage_name
   resource_group_name = azurerm_resource_group.main.name
+  location            = azurerm_resource_group.main.location
   container_names     = []
   kind                = "StorageV2"
   replication_type    = var.storage_replication_type
diff --git a/infra/templates/osdu-r3-mvp/data_partition/main.tf b/infra/templates/osdu-r3-mvp/data_partition/main.tf
index 6bcc20f87d1d2004a3c15bcc402bda83a40a031a..fe5f32546fb73982c77f17cb5454375b90677bf6 100644
--- a/infra/templates/osdu-r3-mvp/data_partition/main.tf
+++ b/infra/templates/osdu-r3-mvp/data_partition/main.tf
@@ -205,6 +205,7 @@ module "storage_account" {
 
   name                  = local.storage_name
   resource_group_name   = azurerm_resource_group.main.name
+  location              = azurerm_resource_group.main.location
   container_names       = concat(var.storage_containers, var.deploy_airflow2_resources ? var.storage_containers_dp_airflow : [], var.is_ingestion_queue_enabled ? var.storage_persistent_ingestion : [])
   queue_names           = var.is_ingestion_queue_enabled ? var.storage_queues_ingestion : var.storage_queues_without_ingestion
   kind                  = "StorageV2"
@@ -237,6 +238,7 @@ module "sdms_storage_account" {
 
   name                  = local.sdms_storage_name
   resource_group_name   = azurerm_resource_group.main.name
+  location              = azurerm_resource_group.main.location
   container_names       = []
   kind                  = "StorageV2"
   replication_type      = var.storage_replication_type
@@ -267,6 +269,7 @@ module "ingest_storage_account" {
 
   name                  = local.ingest_storage_name
   resource_group_name   = azurerm_resource_group.main.name
+  location              = azurerm_resource_group.main.location
   container_names       = []
   kind                  = "StorageV2"
   replication_type      = var.storage_replication_type
@@ -297,6 +300,7 @@ module "hierarchical_storage_account" {
 
   name                = local.hierarchical_storage_name
   resource_group_name = azurerm_resource_group.main.name
+  location            = azurerm_resource_group.main.location
   container_names     = var.storage_containers_hierarchical
   kind                = "StorageV2"
   replication_type    = var.storage_replication_type
@@ -337,6 +341,7 @@ module "cosmosdb_account" {
   sql_collections            = var.cosmos_sql_collections
   is_primary_loc_set         = var.cosmos_primary_loc
   cosmosdb_backup_redundancy = var.cosmosdb_backup_redundancy
+  enable_replication         = var.cosmosdb_enable_replications
 
   authorized_ip_ranges          = var.backend_access_allowed_networks
   public_network_access_enabled = var.backend_network_access_enabled
diff --git a/infra/templates/osdu-r3-mvp/data_partition/variables.tf b/infra/templates/osdu-r3-mvp/data_partition/variables.tf
index d1abddae2b4989c8766f436901efbca314adbb38..e919b4dd4b4e152e072059845f6f2a27b6a10c9f 100644
--- a/infra/templates/osdu-r3-mvp/data_partition/variables.tf
+++ b/infra/templates/osdu-r3-mvp/data_partition/variables.tf
@@ -124,6 +124,12 @@ variable "cosmosdb_automatic_failover" {
   default     = true
 }
 
+variable "cosmosdb_enable_replications" {
+  description = "Determines if replication is enabled for CosmosDB."
+  type        = bool
+  default     = true
+}
+
 variable "cosmos_databases" {
   description = "The list of Cosmos DB SQL Databases."
   type = list(object({
diff --git a/infra/templates/osdu-r3-mvp/service_resources/main.tf b/infra/templates/osdu-r3-mvp/service_resources/main.tf
index 34dcdbdd6cc36ac15483b064a357f1c6c4e4a25c..8d7c032c6c6b8ce2e666bb310ec7233eea2017c5 100644
--- a/infra/templates/osdu-r3-mvp/service_resources/main.tf
+++ b/infra/templates/osdu-r3-mvp/service_resources/main.tf
@@ -254,11 +254,13 @@ module "storage_account" {
 
   name                = local.storage_name
   resource_group_name = azurerm_resource_group.main.name
-  container_names     = var.storage_containers
-  share_names         = var.storage_shares
-  queue_names         = var.storage_queues
-  kind                = "StorageV2"
-  replication_type    = var.storage_replication_type
+  location            = var.resource_group_location
+
+  container_names  = var.storage_containers
+  share_names      = var.storage_shares
+  queue_names      = var.storage_queues
+  kind             = "StorageV2"
+  replication_type = var.storage_replication_type
 
   resource_tags = var.resource_tags
 }
@@ -303,6 +305,7 @@ module "system_storage_account" {
 
   name                = local.system_storage_name
   resource_group_name = azurerm_resource_group.main.name
+  location            = var.resource_group_location
   container_names     = var.system_storage_containers
   kind                = "StorageV2"
   replication_type    = var.storage_replication_type
@@ -575,6 +578,7 @@ module "cosmosdb_account" {
   databases                  = var.cosmos_databases
   sql_collections            = var.cosmos_sql_collections
   cosmosdb_backup_redundancy = var.cosmosdb_backup_redundancy
+  enable_replication         = var.cosmosdb_enable_replications
 
   public_network_access_enabled = var.backend_network_access_enabled
 
@@ -592,6 +596,7 @@ resource "azurerm_role_assignment" "cosmos_access" {
 
 // Create Role definition for "Cosmos DB Built-in Data Contributor"
 data "azurerm_cosmosdb_sql_role_definition" "osdu_identity_sql_role_definition" {
+  depends_on = [module.cosmosdb_account]
 
   resource_group_name = azurerm_resource_group.main.name
   account_name        = module.cosmosdb_account.account_name
diff --git a/infra/templates/osdu-r3-mvp/service_resources/variables.tf b/infra/templates/osdu-r3-mvp/service_resources/variables.tf
index b3d831b1d22963dfbdc931a01dd022d898463cc1..e245444d545171a5fc75941d694da4ba30e0ee36 100644
--- a/infra/templates/osdu-r3-mvp/service_resources/variables.tf
+++ b/infra/templates/osdu-r3-mvp/service_resources/variables.tf
@@ -402,6 +402,12 @@ variable "cosmosdb_automatic_failover" {
   default     = true
 }
 
+variable "cosmosdb_enable_replications" {
+  description = "Determines if replication is enabled for CosmosDB."
+  type        = bool
+  default     = true
+}
+
 variable "cosmos_databases" {
   description = "The list of Cosmos DB SQL Databases."
   type = list(object({
diff --git a/tools/test_data/README.md b/tools/test_data/README.md
index 600fd8fd18fb3a6489349ad4ae74afc14ddc0109..251402d59227c1cc7110a67fceabf654054b254a 100644
--- a/tools/test_data/README.md
+++ b/tools/test_data/README.md
@@ -4,14 +4,6 @@
 
 * [Python 2.7 or 3.5.3+][python]
 
-__SDK installation__
-
-Install the Python SDK
-
-```bash
-pip3 install azure-cosmos
-```
-
 __CLI Login__
 
 Login to Azure CLI using the OSDU Environment Service Principal.
@@ -69,11 +61,11 @@ These files need to be uploaded into the proper Cosmos Collections with the requ
 > NOTE: If you are doing a manual deployment, your partition name is "opendes" by default
 
 ```bash
-# Retrieve Values from Common Key Vault
+# Retrieve Values from Common Key Vault (Login with a user that can access this.)
 COMMON_VAULT="<common keyvault created in common prepare phase>"
 export NO_DATA_ACCESS_TESTER=$(az keyvault secret show --id https://$COMMON_VAULT.vault.azure.net/secrets/osdu-mvp-${UNIQUE}-noaccess-clientid --query value -otsv)
 
-# Retrieve Values from Environment Key Vault
+# Retrieve Values from Environment Key Vault (Login with service principal)
 export COSMOS_ENDPOINT=$(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/${PARTITION_NAME}-cosmos-endpoint --query value -otsv)
 export COSMOS_KEY=$(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/${PARTITION_NAME}-cosmos-primary-key --query value -otsv)
 export SERVICE_PRINCIPAL_ID=$(az keyvault secret show --id https://${ENV_VAULT}.vault.azure.net/secrets/app-dev-sp-username --query value -otsv)
@@ -83,5 +75,8 @@ export GRAPH_DB_HOST=$(az group list --query "[?contains(name, 'cr${UNIQUE}')].n
 
 # Execute the Upload from the test data folder (script expects file locally)
 cd tools/test_data
+python -m venv .venv
+source .venv/bin/activate
+pip install -r requirements.txt
 python3 upload-data.py
 ```
diff --git a/tools/test_data/requirements.txt b/tools/test_data/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..c4a2b1a2d8e8e9336445209a15f1aad9a1789779
--- /dev/null
+++ b/tools/test_data/requirements.txt
@@ -0,0 +1 @@
+azure-cosmos