Skip to content
Snippets Groups Projects
Commit 32c46daa authored by Diego Molteni's avatar Diego Molteni
Browse files

Merge branch 'slb/dm3/remove-bkjob' into 'master'

fix: removed background job for detect running version of azure cosmos

See merge request !452
parents 3213f3ce 36029164
No related branches found
No related tags found
1 merge request!452fix: removed background job for detect running version of azure cosmos
Pipeline #174403 failed
......@@ -37,4 +37,3 @@ newman
# dump file
*.rdb
......@@ -14,13 +14,8 @@
// limitations under the License.
// ============================================================================
import { Config } from '.';
import { Error } from '../shared';
// to-remove once the migration to the new DB has been completed in all Azure deployments
export const ENABLED_COSMOS_MIGRATION = process.env.ENABLED_COSMOS_MIGRATION !== 'false';
export const ENABLE_USAGE_COSMOS_DATABASE_OLD_INDEX = process.env.ENABLE_USAGE_COSMOS_DATABASE_OLD_INDEX === 'true';
export class CloudFactory {
public static register(providerLabel: string) {
......@@ -34,93 +29,20 @@ export class CloudFactory {
};
}
public static azureDatabase: { [key: string]: { regular: boolean, enhanced: boolean, failure: boolean } } = {}
public static build(providerLabel: string, referenceAbstraction: any, args: { [key: string]: any } = {}) {
if (providerLabel === undefined || providerLabel === 'unknown') {
throw Error.make(Error.Status.UNKNOWN, `Unrecognized cloud provider: ${providerLabel}`);
}
if (!ENABLED_COSMOS_MIGRATION) {
for (const provider of CloudFactory.providers[providerLabel]) {
if (provider.prototype instanceof referenceAbstraction) {
if (ENABLE_USAGE_COSMOS_DATABASE_OLD_INDEX && provider.name === 'AzureCosmosDbDAO') {
continue;
}
return new provider(args);
}
}
throw Error.make(Error.Status.UNKNOWN,
`The cloud provider builder that extend ${referenceAbstraction} has not been found`);
} else { // to-remove once the migration to the new DB has been completed in all Azure deployments
const azureJournalProviders: any[] = []; // This is temporary required by Azure deployments.
for (const provider of CloudFactory.providers[providerLabel]) {
if (provider.prototype instanceof referenceAbstraction) {
// This is temporary required by Azure deployments.
// It will allow cosmos databases migration to a new index model with no downtime.
// This condition will be removed once the migration process is completed.
if (provider.name === 'AzureCosmosDbDAO' || provider.name === 'AzureCosmosDbDAORegular') {
if (ENABLE_USAGE_COSMOS_DATABASE_OLD_INDEX && provider.name === 'AzureCosmosDbDAORegular') {
return new provider(args);
}
azureJournalProviders.push(provider);
} else {
return new provider(args);
}
}
for (const provider of CloudFactory.providers[providerLabel]) {
if (provider.prototype instanceof referenceAbstraction) {
return new provider(args);
}
// This is temporary required by Azure deployments.
// It will allow cosmos databases migration to a new index model with no downtime.
// This if condition it will be removed once the migration process is completed.
if (azureJournalProviders.length > 0) {
const partition = args.name;
// the list of partitions is refreshed every minute.
// if a newly partition is created return the enhanced implementation
if (!(partition in CloudFactory.azureDatabase)) {
for (const azureProvider of azureJournalProviders) {
if (azureProvider.name === 'AzureCosmosDbDAO') {
return new azureProvider(args);
}
}
}
// database are not detected most probably due to an issue with cosmos <> 404, example 429 rate-limit
if (CloudFactory.azureDatabase[partition].failure) {
throw (Error.make(Error.Status.NOT_AVAILABLE,
'The service failed to locate the internal Cosmos DB. Call should be retried'));
}
// both database cannot exist at the same time. the migration process is probably running
if (CloudFactory.azureDatabase[partition].regular && CloudFactory.azureDatabase[partition].enhanced) {
throw (Error.make(Error.Status.NOT_AVAILABLE,
'The partition has 2 active databases in cosmos. A migration process is possibly in place.'));
}
// load the right implementation. supported is provided for both version of the db
// if no database exist, the new one will be used (newly created partitions)
Config.ENFORCE_SCHEMA_BY_KEY = !CloudFactory.azureDatabase[partition].regular
const cosmosClassName = CloudFactory.azureDatabase[partition].regular ? 'AzureCosmosDbDAORegular' : 'AzureCosmosDbDAO';
for (const azureProvider of azureJournalProviders) {
if (azureProvider.name === cosmosClassName) {
return new azureProvider(args);
}
}
}
throw Error.make(Error.Status.UNKNOWN,
`The cloud provider builder that extend ${referenceAbstraction} has not been found`);
}
throw Error.make(Error.Status.UNKNOWN,
`The cloud provider builder that extend ${referenceAbstraction} has not been found`);
}
private static providers: { [key: string]: any[] } = {};
}
......@@ -14,10 +14,8 @@
// limitations under the License.
// ============================================================================
import { ENABLED_COSMOS_MIGRATION } from '../../cloud';
import { Config, ConfigFactory } from '../../config';
import { LoggerFactory } from '../../logger';
import { DatabaseChecker } from './cosmos-background';
import { AzureInsightsLogger } from './insights';
import { Keyvault } from './keyvault';
......@@ -186,15 +184,6 @@ export class AzureConfig extends Config {
// initialize app insight
AzureInsightsLogger.initialize();
// initialize and run background job to detect databases existence
if (ENABLED_COSMOS_MIGRATION) {
await DatabaseChecker.collectPartitions();
await DatabaseChecker.checkDatabaseExistence();
DatabaseChecker.run().catch((error) => {
LoggerFactory.build(Config.CLOUDPROVIDER).error(error);
});
}
} catch (error) {
LoggerFactory.build(Config.CLOUDPROVIDER).error('Unable to initialize configuration for azure cloud provider ' + error);
throw error;
......
// ============================================================================
// Copyright 2017-2022, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
import { Container, CosmosClient } from '@azure/cosmos';
import { getInMemoryCacheInstance } from '../../../shared';
import { CloudFactory } from '../../cloud';
import { AzureDataEcosystemServices } from './dataecosystem';
// This background process checks the existence of these databases in each partition every 5 seconds
// * old index model database id: seistore-<partition-id>-db
// * new index model database id: sdms-dm
export class DatabaseChecker {
private static partitions: string[];
private static clients: { [key: string]: CosmosClient } = {};
private static containers: { [key: string]: Container } = {};
public static async run(): Promise<void> {
// refresh the list of existing partitions every 60 seconds.
// these calls will hit the partition service and the list of partition rarely change.
setInterval(DatabaseChecker.collectPartitions, 60 * 1000);
// refresh database existence list for each partition every 5 seconds.
// this call hit cosmos and require to be executed more frequently.
setInterval(DatabaseChecker.checkDatabaseExistence, 30 * 1000);
}
// refresh the list of existing partitions
public static async collectPartitions(): Promise<void> {
DatabaseChecker.partitions = (await AzureDataEcosystemServices.getPartitions()).filter((partition) => {
return !partition.startsWith('integrationtest');
});
}
// refresh the list of existing databases
public static async checkDatabaseExistence(): Promise<void> {
if (DatabaseChecker.partitions) {
for (const partition of DatabaseChecker.partitions) {
// retrieve the connection parameters if not already fetched
if (!(partition in DatabaseChecker.clients)) {
try {
const connectionParams = await AzureDataEcosystemServices.getCosmosConnectionParams(partition);
DatabaseChecker.clients[partition] = new CosmosClient({
endpoint: connectionParams.endpoint,
key: connectionParams.key
})
} catch (err: any) { continue; }
}
// record a failure if the failure reason <> 404 Not Found
let failure = false;
// check if the new/enhanced database exist
let enhanced = false
try {
await DatabaseChecker.clients[partition].database('sdms-db').read();
enhanced = true
} catch (err: any) {
if (!(err instanceof Error) || (err as Error).message.indexOf('StatusCode: 404') === -1) {
failure = true;
}
}
// check if the regular database exist
let regular = false;
try {
await DatabaseChecker.clients[partition].database('seistore-' + partition + '-db').read();
regular = true;
} catch (err: any) {
if (!(err instanceof Error) || (err as Error).message.indexOf('StatusCode: 404') === -1) {
failure = true;
}
}
if (regular && enhanced) {
// retrieve the enhanced database container
if (!(partition in DatabaseChecker.containers)) {
const database = DatabaseChecker.clients[partition].database('sdms-db');
DatabaseChecker.containers[partition] = database.container('data');
}
// check if the migration completed flag exists in the database
if ((await DatabaseChecker.containers[partition].item(
'z:mig:db:complete', 'z:mig:db:complete').read()).statusCode === 200) {
regular = false;
}
// check if the cache clear flag exists in the database
if ((await DatabaseChecker.containers[partition].item(
'z:cache::clear', 'z:cache::clear').read()).statusCode === 200) {
getInMemoryCacheInstance().flushAll();
}
}
// build or update the reference databases existence object in the cloud provider
if (partition in CloudFactory.azureDatabase) {
CloudFactory.azureDatabase[partition].regular = regular;
CloudFactory.azureDatabase[partition].enhanced = enhanced;
CloudFactory.azureDatabase[partition].failure = failure;
} else {
CloudFactory.azureDatabase[partition] = { regular, enhanced, failure };
}
}
}
}
}
\ No newline at end of file
// ============================================================================
// Copyright 2017-2022, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
import { CosmosClient, Container, SqlQuerySpec, SqlParameter, FeedOptions } from '@azure/cosmos';
import {
AbstractJournal, AbstractJournalTransaction,
IJournalQueryModel, IJournalTransaction, JournalFactory
} from '../../journal';
import { Utils } from '../../../shared/utils'
import { TenantModel } from '../../../services/tenant';
import { AzureDataEcosystemServices } from './dataecosystem';
import { AzureConfig } from './config';
@JournalFactory.register('azure')
export class AzureCosmosDbDAORegular extends AbstractJournal {
public KEY = Symbol('id');
private dataPartition: string;
private static containerCache: { [key: string]: Container; } = {};
public async getCosmoContainer(): Promise<Container> {
const containerId = 'seistore-' + this.dataPartition + '-container';
if (AzureCosmosDbDAORegular.containerCache[containerId]) {
return AzureCosmosDbDAORegular.containerCache[containerId];
} else {
const connectionParams = await AzureDataEcosystemServices.getCosmosConnectionParams(this.dataPartition);
const cosmosClient = new CosmosClient({
endpoint: connectionParams.endpoint,
key: connectionParams.key
});
const { database } = await cosmosClient.databases.createIfNotExists({ id: 'seistore-' + this.dataPartition + '-db' });
const { container } = await database.containers.createIfNotExists({
id: 'seistore-' + this.dataPartition + '-container',
maxThroughput: AzureConfig.COSMO_MAX_THROUGHPUT,
partitionKey: '/key'
});
AzureCosmosDbDAORegular.containerCache[containerId] = container;
return AzureCosmosDbDAORegular.containerCache[containerId];
}
}
public constructor(tenant: TenantModel) {
super();
this.dataPartition = tenant.esd.indexOf('.') !== -1 ? tenant.esd.split('.')[0] : tenant.esd;
}
public async save(datasetEntity: any): Promise<void> {
if (!(datasetEntity instanceof Array)) {
datasetEntity = [datasetEntity];
}
for (const entity of datasetEntity) {
const item = {
id: entity.key.name,
key: entity.key.partitionKey,
data: entity.data
}
item.data[this.KEY.toString()] = entity.key;
if (entity.ctag) {
item.data.ctag = entity.ctag;
}
await (await this.getCosmoContainer()).items.upsert(item);
}
}
public async get(key: any): Promise<[any | any[]]> {
const item = await (await this.getCosmoContainer()).item(key.name, key.partitionKey).read();
if (item.resource === undefined) {
return [undefined];
};
const data = item.resource.data;
data[this.KEY] = data[this.KEY.toString()];
delete data[this.KEY.toString()];
return [data];
}
public async delete(key: any): Promise<void> {
await (await this.getCosmoContainer()).item(key.name, key.partitionKey).delete();
}
public createQuery(namespace: string, kind: string): IJournalQueryModel {
return new AzureCosmosDbQueryRegular(namespace, kind);
}
public async runQuery(query: IJournalQueryModel): Promise<[any[], { endCursor?: string }]> {
const cosmosQuery = (query as AzureCosmosDbQueryRegular);
const statement = cosmosQuery.prepareSqlStatement(AzureConfig.DATASETS_KIND);
const response = await (await this.getCosmoContainer()).items.query(
statement.spec, statement.options).fetchNext();
const results = response.resources.map(result => {
if (!result.data) {
return result;
} else {
if (result.data[this.KEY.toString()]) {
result.data[this.KEY] = result.data[this.KEY.toString()];
delete result.data[this.KEY.toString()];
return result.data;
} else {
return result.data;
}
}
});
return Promise.resolve(
[
results,
{
endCursor: response.continuationToken
}
]);
}
public createKey(specs: any): object {
const kind = specs.path[0];
const partitionKey = specs.namespace + '-' + kind;
let name: string;
if (kind === AzureConfig.DATASETS_KIND) {
name = Utils.makeID(16);
} else if (kind === AzureConfig.SEISMICMETA_KIND) {
name = specs.path[1].replace(/\W/g, '-');
} else {
name = specs.path[1];
}
return { name, partitionKey, kind };
}
// new instance of AzureCosmosDbTransactionDAO
public getTransaction(): IJournalTransaction {
return new AzureCosmosDbTransactionDAO(this);
}
public getQueryFilterSymbolContains(): string {
return 'CONTAINS';
}
public type: OperationType;
public entityOrKey: any;
}
declare type OperationType = 'save' | 'delete';
class AzureCosmosDbTransactionOperation {
public constructor(type: OperationType, entityOrKey: any) {
this.type = type;
this.entityOrKey = entityOrKey;
}
public type: OperationType;
public entityOrKey: any;
}
/**
* A wrapper class for datastore transactions
* ! Note: looks awfully close to datastore interface.
*/
class AzureCosmosDbTransactionDAO extends AbstractJournalTransaction {
public KEY = null;
public constructor(owner: AzureCosmosDbDAORegular) {
super();
this.owner = owner;
this.KEY = this.owner.KEY;
}
public async save(entity: any): Promise<void> {
this.queuedOperations.push(new AzureCosmosDbTransactionOperation('save', entity));
await Promise.resolve();
}
public async get(key: any): Promise<[any | any[]]> {
return await this.owner.get(key);
}
public async delete(key: any): Promise<void> {
this.queuedOperations.push(new AzureCosmosDbTransactionOperation('delete', key));
await Promise.resolve();
}
public createQuery(namespace: string, kind: string): IJournalQueryModel {
return this.owner.createQuery(namespace, kind);
}
public async runQuery(query: IJournalQueryModel): Promise<[any[], { endCursor?: string }]> {
return await this.owner.runQuery(query);
}
public async run(): Promise<void> {
if (this.queuedOperations.length) {
await Promise.reject('Transaction is already in use.');
}
else {
this.queuedOperations = [];
return Promise.resolve();
}
}
public async rollback(): Promise<void> {
this.queuedOperations = [];
return Promise.resolve();
}
public async commit(): Promise<void> {
for (const operation of this.queuedOperations) {
if (operation.type === 'save') {
await this.owner.save(operation.entityOrKey);
}
if (operation.type === 'delete') {
await this.owner.delete(operation.entityOrKey);
}
}
this.queuedOperations = [];
return Promise.resolve();
}
public getQueryFilterSymbolContains(): string {
return 'CONTAINS';
}
private owner: AzureCosmosDbDAORegular;
public queuedOperations: AzureCosmosDbTransactionOperation[] = [];
}
declare type Operator = '=' | '<' | '>' | '<=' | '>=' | 'HAS_ANCESTOR' | 'CONTAINS';
class SqlStatementBuilder {
constructor(tableName: string, alias: string) {
this.tableName = tableName;
this.alias = alias;
}
public getUniqueParameterName(baseName: string): string {
let actualName = baseName;
let paramIndex = 0;
// find the first variation of this param name that is unused
while (this.parameterValues.find(p => p.name === `@${actualName}`)) {
paramIndex++;
actualName = `${actualName}${paramIndex}`;
}
return actualName;
}
public addFilterExpression(expression: string, parameterName: string, value: {}) {
this.filterExpressions.push(expression);
this.parameterValues.push({ name: parameterName, value });
}
public build(): SqlQuerySpec {
let query = '';
query = '';
for (const filter of this.filterExpressions) {
if (query) {
query += ' AND ';
}
query += filter;
}
if (query) {
query = `WHERE ${query}`;
}
let fieldList = '*';
if (this.projectedFieldNames.length) {
fieldList = '';
for (const field of this.projectedFieldNames) {
if (fieldList) {
fieldList += ', ';
}
fieldList += `${this.alias}.data.${field}`;
}
}
if (query) {
query = `SELECT ${fieldList} FROM ${this.tableName} AS ${this.alias} ${query}`;
}
else {
query = `SELECT ${fieldList} FROM ${this.tableName} AS ${this.alias}`;
}
if (this.groupByFieldNames.length) {
let groupByList = '';
for (const field of this.groupByFieldNames) {
if (groupByList) {
groupByList += ', ';
}
groupByList += `${this.alias}.data.${field}`;
}
query = `${query} GROUP BY ${groupByList}`;
}
return {
query,
parameters: this.parameterValues
};
}
public tableName: string;
public alias: string;
private filterExpressions: string[] = [];
private parameterValues: SqlParameter[] = [];
public projectedFieldNames: string[] = [];
public groupByFieldNames: string[] = [];
}
class AzureCosmosDbFilter {
public constructor(property: string, operator: Operator, value: {}) {
this.property = property;
this.operator = operator;
this.value = value;
}
public property: string;
public operator: Operator;
public value: {};
public addFilterExpression(toStatement: SqlStatementBuilder) {
if (this.operator === 'HAS_ANCESTOR') {
throw new Error('HAS_ANCESTOR operator is not supported in query filters.');
}
const parameterName = `@${toStatement.getUniqueParameterName(this.property)}`;
if (this.operator === 'CONTAINS') {
toStatement.addFilterExpression(
`ARRAY_CONTAINS(${toStatement.alias}.data.${this.property} , ${parameterName})`,
parameterName,
this.value
);
}
else {
toStatement.addFilterExpression(
`${toStatement.alias}.data.${this.property} ${this.operator} ${parameterName}`,
parameterName,
this.value
);
}
}
}
/**
* A shim for CosmosDB that provides compatibility with Google's SDK.
* ! Note: looks awfully close to Google Query interface.
*/
export class AzureCosmosDbQueryRegular implements IJournalQueryModel {
public constructor(namespace: string, kind: string) {
this.namespace = namespace;
this.kind = kind;
}
filter(property: string, value: {}): IJournalQueryModel;
filter(property: string, operator: Operator, value: {}): IJournalQueryModel;
filter(property: string, operator?: Operator, value?: {}): IJournalQueryModel {
if (value === undefined) {
value = operator;
operator = '=';
}
if (operator === undefined) {
operator = '=';
}
if (value === undefined) {
value = '';
}
const filter = new AzureCosmosDbFilter(property, operator, value);
this.filters.push(filter);
return this;
}
start(start: string | Buffer): IJournalQueryModel {
if (start instanceof Buffer) {
throw new Error('Type \'Buffer\' is not supported for CosmosDB Continuation while paging.');
}
this.pagingStart = start as string;
return this;
}
limit(n: number): IJournalQueryModel {
this.pagingLimit = n;
return this;
}
groupBy(fieldNames: string | string[]): IJournalQueryModel {
if (typeof fieldNames === 'string') {
this.groupByFieldNames = [fieldNames];
} else {
this.groupByFieldNames = fieldNames;
}
return this;
}
select(fieldNames: string | string[]): IJournalQueryModel {
if (typeof fieldNames === 'string') {
this.projectedFieldNames = [fieldNames];
} else {
this.projectedFieldNames = fieldNames;
}
return this;
}
private filters: AzureCosmosDbFilter[] = [];
private projectedFieldNames: string[] = [];
private groupByFieldNames: string[] = [];
private pagingStart?: string;
private pagingLimit?: number;
public namespace: string;
public kind: string;
public prepareSqlStatement(tableName: string): { spec: SqlQuerySpec, options: FeedOptions } {
const builder = new SqlStatementBuilder(tableName, 'a');
for (const filter of this.filters) {
filter.addFilterExpression(builder);
}
builder.projectedFieldNames = this.projectedFieldNames;
builder.groupByFieldNames = this.groupByFieldNames;
const spec = builder.build();
return {
spec,
options: {
partitionKey: `${this.namespace}-${this.kind}`,
continuationToken: this.pagingStart,
maxItemCount: this.pagingLimit || -1
}
};
}
}
\ No newline at end of file
......@@ -16,7 +16,6 @@
export { AzureCloudStorage } from './cloudstorage';
export { AzureCosmosDbDAO, AzureCosmosDbQuery } from './cosmosdb';
export { AzureCosmosDbDAORegular, AzureCosmosDbQueryRegular } from './cosmosdb-regular';
export { AzureInsightsLogger } from './insights';
export { AzureConfig } from './config';
export { AzureCredentials } from './credentials';
......
import sinon from 'sinon';
import { CosmosClient, Container, SqlQuerySpec, SqlParameter, FeedOptions, Item, Items, QueryIterator, FeedResponse } from '@azure/cosmos';
import {
AbstractJournal, AbstractJournalTransaction,
IJournalQueryModel, IJournalTransaction, JournalFactory
} from '../../../../src/cloud/journal';
import { Utils } from '../../../../src/shared/utils'
import { TenantModel } from '../../../../src/services/tenant/index';
import { AzureDataEcosystemServices } from '../../../../src/cloud/providers/azure/dataecosystem';
import { AzureConfig } from '../../../../src/cloud/providers/azure/config';
import { Config } from '../../../../src/cloud';
import { AzureCosmosDbDAORegular, AzureCosmosDbQueryRegular } from '../../../../src/cloud/providers/azure/cosmosdb-regular';
import { Tx } from '../../utils';
import { assert } from 'chai';
import { throws } from 'assert';
export class TestAzureCosmosDbDAORegular {
private static sandbox: sinon.SinonSandbox;
private static cosmos: AzureCosmosDbDAORegular;
public static run() {
describe(Tx.testInit('azure cosmos db dao regular test'), () => {
Config.CLOUDPROVIDER = 'azure';
this.sandbox = sinon.createSandbox();
this.cosmos = new AzureCosmosDbDAORegular({ gcpid: 'gcpid', default_acls: 'x', esd: 'gcpid@domain.com', name: 'gcpid' });
beforeEach(() => {
this.sandbox.stub(AzureCosmosDbDAORegular.prototype, 'getCosmoContainer').resolves(
new Container(undefined, 'id', undefined));
});
afterEach(() => {
this.sandbox.restore();
});
this.save();
this.get();
this.delete();
this.createQuery();
this.runQuery();
this.createKey();
});
};
private static save() {
const mockEntity = {
key: {
name: "testPartitionKeyName",
partitionKey: 'testPartitionKey',
id: 'testId'
},
data: {
id: 'test',
ctag: "0000000000000000"
},
ctag: "0000000000000000"
}
Tx.sectionInit('save');
Tx.test(async (done: any) => {
this.sandbox.stub(Items.prototype, 'upsert').resolves(mockEntity.data as any);
this.cosmos.save(mockEntity).then(res => {
done();
});
});
};
private static get() {
Tx.sectionInit('get');
const key = {
id: 'testId',
partitionKey: 'testKey',
kind: 'testKind'
};
Tx.test(async (done: any) => {
const mockResult = {
resource: {
data: {
id: 'testId',
param: 'testParam'
}
}
} as any;
this.sandbox.stub(Item.prototype, 'read').returns(mockResult);
const [result] = await this.cosmos.get(key);
assert.deepEqual(mockResult.resource.data, result, 'Get returned wrong object');
done();
});
Tx.test(async (done: any) => {
const mockResult = {
resource: undefined,
statusCode: 404
} as any;
this.sandbox.stub(Item.prototype, 'read').returns(Promise.resolve(mockResult));
const [res] = await this.cosmos.get(key);
Tx.checkTrue(res === undefined, done);
});
};
private static delete() {
Tx.sectionInit('delete');
Tx.test(async (done: any) => {
this.sandbox.stub(Item.prototype, 'delete').resolves();
await this.cosmos.delete({ partitionKey: 'entity' });
done();
});
};
private static createQuery() {
Tx.sectionInit('createQuery');
Tx.test( (done: any) => {
let res = this.cosmos.createQuery("namespace", "kind");
Tx.checkTrue(res !== undefined, done);
});
};
private static runQuery() {
Tx.sectionInit('runQuery');
let feedResponse: FeedResponse<any> = {
resources: ["resources"],
headers: undefined,
hasMoreResults: false,
continuation: '',
continuationToken: 'continuationToken',
queryMetrics: '',
requestCharge: 0,
activityId: ''
} as any;
let queryIterator: QueryIterator<any> = {
fetchNext: function (): Promise<FeedResponse<any>> {
return Promise.resolve(feedResponse);
}
} as any;
let azureCosmosDbQueryRegular = {
filter: function (property: string, value: {}): AzureCosmosDbQueryRegular {
throw new Error('Function not implemented.');
},
start: function (start: string | Buffer): AzureCosmosDbQueryRegular {
throw new Error('Function not implemented.');
},
limit: function (n: number): AzureCosmosDbQueryRegular {
throw new Error('Function not implemented.');
},
groupBy: function (fieldNames: string | string[]): AzureCosmosDbQueryRegular {
throw new Error('Function not implemented.');
},
select: function (fieldNames: string | string[]): AzureCosmosDbQueryRegular {
throw new Error('Function not implemented.');
},
filters: [],
projectedFieldNames: [],
groupByFieldNames: [],
namespace: 'namespace',
kind: 'datasets',
prepareSqlStatement: function (tableName: string): { spec: SqlQuerySpec; options: FeedOptions; } {
let spec: SqlQuerySpec = { query: '' };
let options: FeedOptions = {};
return ({spec, options});
}
};
Tx.test( async(done: any) => {
this.sandbox.stub(Items.prototype, 'query').returns(queryIterator);
let output = await this.cosmos.runQuery(azureCosmosDbQueryRegular);
Tx.checkTrue(output !== undefined, done);
});
};
private static createKey() {
Tx.sectionInit('create key');
Tx.test(async (done: any) => {
const specs = {
namespace: 'testNamespace',
path: [AzureConfig.DATASETS_KIND, 'dsName']
}
const expectedKey = { name: 'ws2vlmnTpgoQf41X', partitionKey: 'testNamespace-datasets', kind: "datasets" }
this.sandbox.stub(Utils, "makeID").returns("ws2vlmnTpgoQf41X");
const key = this.cosmos.createKey(specs);
assert.deepEqual(key, expectedKey, 'Keys do not match');
done();
});
Tx.test(async (done: any) => {
const specs = {
namespace: 'testNamespace',
path: [AzureConfig.SEISMICMETA_KIND, 'skName']
}
const expectedKey = { name: 'skName', partitionKey: 'testNamespace-seismicmeta', kind: "seismicmeta" }
const key = this.cosmos.createKey(specs);
assert.deepEqual(key, expectedKey, 'Keys do not match');
done();
});
Tx.test(async (done: any) => {
const specs = {
namespace: 'testNamespace',
path: [AzureConfig.APPS_KIND, 'apName']
}
const expectedKey = { name: 'apName', partitionKey: 'testNamespace-apps', kind: "apps" }
const key = this.cosmos.createKey(specs);
assert.deepEqual(key, expectedKey, 'Keys do not match');
done();
});
};
};
\ No newline at end of file
......@@ -29,7 +29,6 @@ import { TestAzureCosmosDbTransactionDAO } from './azure/cosmosdb-transactions';
import { TestGCSCore } from './google/gcs';
import { TestAzureKeyVault } from './azure/keyvault';
import { TestAzureStorage } from './azure/cloudstorage';
import { TestAzureCosmosDbDAORegular } from './azure/azureCosmosDbDAORegular';
import { TestDataEcoSystem } from './google/dataecosystem';
......@@ -42,7 +41,6 @@ export class TestCloud {
TestGCSCore.run();
TestGoogleDatastoreDAO.run();
TestGoogleDatastoreTransactionDAO.run();
TestAzureCosmosDbDAORegular.run();
TestAzureCosmosDbDAO.run();
TestAzureCosmosDbTransactionDAO.run();
TestAzureKeyVault.run();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment