Commit b397cb02 authored by Diego Molteni's avatar Diego Molteni
Browse files

Catalogue redesigned [GOOGLE ONLY] + shared cache connections

parent a51af248
......@@ -37,12 +37,7 @@ export class Auth {
esd: string, appkey: string, mustThrow = true): Promise<boolean> {
if (!this._cache) {
this._cache = new Cache<boolean>({
ADDRESS: Config.DES_REDIS_INSTANCE_ADDRESS,
PORT: Config.DES_REDIS_INSTANCE_PORT,
KEY: Config.DES_REDIS_INSTANCE_KEY,
DISABLE_TLS: Config.DES_REDIS_INSTANCE_TLS_DISABLE,
}, 'auth')
this._cache = new Cache<boolean>('auth')
}
const cacheKey = (
......
// ============================================================================
// Copyright 2017-2019, Schlumberger
// Copyright 2017-2021, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -47,6 +47,8 @@ export interface ConfigModel {
SSL_ENABLED?: boolean;
SSL_KEY_PATH?: string;
SSL_CERT_PATH?: string;
ENFORCE_SCHEMA_BY_KEY?: boolean;
CORRELATION_ID?: string;
FEATURE_FLAG_AUTHORIZATION: boolean;
FEATURE_FLAG_LEGALTAG: boolean;
FEATURE_FLAG_SEISMICMETA_STORAGE: boolean;
......@@ -133,16 +135,21 @@ export abstract class Config implements IConfig {
public static SERVICEGROUPS_PREFIX = 'service.seistore'
// Server SSL
public static SSL_ENABLED = false;
public static SSL_ENABLED: boolean;
public static SSL_KEY_PATH: string;
public static SSL_CERT_PATH: string;
public static ENFORCE_SCHEMA_BY_KEY: boolean;
// The Key name of the header correlation ID
public static CORRELATION_ID: string;
// WriteLock Skip
// This is an open issue to discuss.
// Checking the write lock is the correct behaviour and this varialbe shoudl be set to "false".
// The current client libraries are not capable to send the lockin session id on mutable operations.
// Checking the write lock is the correct behavior and this variable should be set to "false".
// The current client libraries are not capable to send the locking session id on mutable operations.
// As results imposing this check will break the functionalities of many current running applications.
// The C++ SDK mainly reuqire a fix on how behave on mutable calls.
// The C++ SDK mainly requires a fix on how behave on mutable calls.
public static SKIP_WRITE_LOCK_CHECK_ON_MUTABLE_OPERATIONS = true;
public static setCloudProvider(cloudProvider: string) {
......@@ -203,6 +210,10 @@ export abstract class Config implements IConfig {
Config.SSL_KEY_PATH = model.SSL_KEY_PATH;
Config.SSL_CERT_PATH = model.SSL_CERT_PATH;
Config.ENFORCE_SCHEMA_BY_KEY = model.ENFORCE_SCHEMA_BY_KEY || false;
Config.CORRELATION_ID = model.CORRELATION_ID || undefined;
Config.checkRequiredConfig(Config.CLOUDPROVIDER, 'CLOUDPROVIDER');
Config.checkRequiredConfig(Config.SERVICE_ENV, 'SERVICE_ENV');
Config.checkRequiredConfig(Config.IMP_SERVICE_ACCOUNT_SIGNER, 'IMP_SERVICE_ACCOUNT_SIGNER');
......@@ -219,7 +230,7 @@ export abstract class Config implements IConfig {
Config.checkRequiredConfig(Config.JWT_AUDIENCE, 'JWT_AUDIENCE');
}
// autogenerated configurations
// auto generated configurations
Config.ORGANIZATION_NS = Config.ORGANIZATION_NS + '-' + Config.SERVICE_ENV;
Config.SEISMIC_STORE_NS = Config.SEISMIC_STORE_NS + '-' + Config.SERVICE_ENV;
......@@ -242,5 +253,5 @@ export class ConfigFactory extends CloudFactory {
}
}
// Set the Utest flag correctly as sooon as the config class get loaded
// Set the Utest flag correctly as soon as the config class get loaded
Config.UTEST = process.env.UTEST;
......@@ -91,6 +91,7 @@ export class AzureConfig extends Config {
JWT_AUDIENCE: process.env.JWT_AUDIENCE,
JWT_ENABLE_FEATURE: process.env.JWT_ENABLE_FEATURE ? process.env.JWT_ENABLE_FEATURE === 'true' : false,
TENANT_JOURNAL_ON_DATA_PARTITION: true,
CORRELATION_ID: 'correlation-id',
FEATURE_FLAG_AUTHORIZATION: process.env.FEATURE_FLAG_AUTHORIZATION !== undefined ?
process.env.FEATURE_FLAG_AUTHORIZATION !== 'false' : true,
FEATURE_FLAG_LEGALTAG: process.env.FEATURE_FLAG_LEGALTAG !== undefined ?
......
......@@ -75,12 +75,7 @@ export class AzureDataEcosystemServices extends AbstractDataEcosystemCore {
public static async getStorageAccountName(dataPartitionID: string): Promise<string> {
if (!this._storageConfigs) {
this._storageConfigs = new Cache<string>({
ADDRESS: AzureConfig.DES_REDIS_INSTANCE_ADDRESS,
PORT: AzureConfig.DES_REDIS_INSTANCE_PORT,
KEY: AzureConfig.DES_REDIS_INSTANCE_KEY,
DISABLE_TLS: AzureConfig.DES_REDIS_INSTANCE_TLS_DISABLE,
}, 'storage')
this._storageConfigs = new Cache<string>('storage')
}
const res = await this._storageConfigs.get(dataPartitionID);
......@@ -101,12 +96,7 @@ export class AzureDataEcosystemServices extends AbstractDataEcosystemCore {
dataPartitionID: string): Promise<{ endpoint: string, key: string }> {
if (!this._cosmosConfigs) {
this._cosmosConfigs = new Cache<string>({
ADDRESS: AzureConfig.DES_REDIS_INSTANCE_ADDRESS,
PORT: AzureConfig.DES_REDIS_INSTANCE_PORT,
KEY: AzureConfig.DES_REDIS_INSTANCE_KEY,
DISABLE_TLS: AzureConfig.DES_REDIS_INSTANCE_TLS_DISABLE,
}, 'cosmos')
this._cosmosConfigs = new Cache<string>('cosmos')
}
const res = await this._cosmosConfigs.get(dataPartitionID);
......
......@@ -32,7 +32,7 @@ export class ConfigGoogle extends Config {
// System admin role (tenant provisioning required role)
public static SEISTORE_SYSTEM_ADMIN_ROLE = 'seismic_store.system.admin';
// DE target audiance for service to service communication
// DE target audience for service to service communication
public static DES_SERVICE_TARGET_AUDIENCE: string;
// google cloud service project id
......@@ -56,7 +56,7 @@ export class ConfigGoogle extends Config {
public async init(): Promise<void> {
// load des target audiance for service to service communication
// load des target audience for service to service communication
ConfigGoogle.DES_SERVICE_TARGET_AUDIENCE = process.env.SEISTORE_DES_TARGET_AUDIENCE;
Config.checkRequiredConfig(ConfigGoogle.DES_SERVICE_TARGET_AUDIENCE, 'DES_SERVICE_TARGET_AUDIENCE');
......@@ -96,6 +96,8 @@ export class ConfigGoogle extends Config {
JWT_EXCLUDE_PATHS: process.env.JWT_EXCLUDE_PATHS,
JWT_AUDIENCE: process.env.JWT_AUDIENCE,
JWT_ENABLE_FEATURE: process.env.JWT_ENABLE_FEATURE ? process.env.JWT_ENABLE_FEATURE === 'true' : false,
ENFORCE_SCHEMA_BY_KEY: true,
CORRELATION_ID: 'correlation-id',
TENANT_JOURNAL_ON_DATA_PARTITION: false,
FEATURE_FLAG_AUTHORIZATION: process.env.FEATURE_FLAG_AUTHORIZATION !== undefined ?
process.env.FEATURE_FLAG_AUTHORIZATION !== 'false' : true,
......
......@@ -68,6 +68,9 @@ export class DatastoreDAO extends AbstractJournal {
}
public createKey(specs: any): object {
if(specs.enforcedKey) {
specs.path.push(specs.enforcedKey)
}
return this.getDataStoreClient().key(specs);
}
......
......@@ -17,6 +17,7 @@
import { Storage } from '@google-cloud/storage';
import { TenantModel } from '../../../services/tenant';
import { Config } from '../../config';
import { LoggerFactory } from '../../logger';
import { AbstractStorage, StorageFactory } from '../../storage';
import { ConfigGoogle } from './config';
......@@ -34,7 +35,7 @@ export class GCS extends AbstractStorage {
this.projectID = tenant.gcpid
}
private getStorageclient(): Storage {
private getStorageClient(): Storage {
if (GCS.clientsCache[this.projectID]) {
return GCS.clientsCache[this.projectID];
} else {
......@@ -57,7 +58,7 @@ export class GCS extends AbstractStorage {
// Create a new bucket
public async createBucket(
bucketName: string, location: string, storageClass: string): Promise<void> {
const bucket = this.getStorageclient().bucket(bucketName);
const bucket = this.getStorageClient().bucket(bucketName);
await bucket.create({ location, storageClass });
await bucket.setMetadata({
......@@ -71,33 +72,36 @@ export class GCS extends AbstractStorage {
// Delete a bucket
public async deleteBucket(bucketName: string, force = false): Promise<void> {
await this.getStorageclient().bucket(bucketName).delete();
await this.getStorageClient().bucket(bucketName).delete();
}
// Delete all files in a bucket
public async deleteFiles(bucketName: string): Promise<void> {
await this.getStorageclient().bucket(bucketName).deleteFiles();
await this.getStorageClient().bucket(bucketName).deleteFiles();
}
// save an object/file to a bucket
public async saveObject(bucketName: string, objectName: string, data: string): Promise<void> {
// Create and save the file
await this.getStorageclient().bucket(bucketName).file(objectName).save(data);
await this.getStorageClient().bucket(bucketName).file(objectName).save(data);
}
// delete an object from a bucket
public async deleteObject(bucketName: string, objectName: string): Promise<void> {
await this.getStorageclient().bucket(bucketName).file(objectName).delete();
await this.getStorageClient().bucket(bucketName).file(objectName).delete();
}
// delete multiple objects
public async deleteObjects(bucketName: string, prefix: string, async: boolean = false): Promise<void> {
prefix = prefix ? (prefix + '/').replace('//', '/') : prefix;
if (async) {
await this.getStorageclient().bucket(bucketName).deleteFiles({ prefix, force: true });
await this.getStorageClient().bucket(bucketName).deleteFiles({ prefix, force: true });
} else {
// tslint:disable-next-line: no-floating-promises
this.getStorageclient().bucket(bucketName).deleteFiles({ prefix, force: true });
this.getStorageClient().bucket(bucketName).deleteFiles(
{ prefix, force: true }).catch(
// tslint:disable-next-line: no-console
(error)=>{ LoggerFactory.build(Config.CLOUDPROVIDER).error(JSON.stringify(error)); });
}
}
......@@ -119,8 +123,8 @@ export class GCS extends AbstractStorage {
const rmPrefixIn = prefixIn ? prefixIn !== '/' : false;
const bucketFrom = this.getStorageclient().bucket(bucketIn);
const bucketTo = bucketIn === bucketOut ? undefined : this.getStorageclient().bucket(bucketOut);
const bucketFrom = this.getStorageClient().bucket(bucketIn);
const bucketTo = bucketIn === bucketOut ? undefined : this.getStorageClient().bucket(bucketOut);
const copyCalls = [];
let nextPageToken = '';
......@@ -141,7 +145,7 @@ export class GCS extends AbstractStorage {
// check if a bucket exist
public async bucketExists(bucketName: string): Promise<boolean> {
const result = await this.getStorageclient().bucket(bucketName).exists();
const result = await this.getStorageClient().bucket(bucketName).exists();
return result[0];
}
......
......@@ -8,6 +8,7 @@ import { Config } from '../../config';
import { Utils } from '../../../shared/utils'
import { IbmConfig } from './config';
import { logger } from './logger';
import { LoggerFactory } from '../../logger';
let docDb;
@JournalFactory.register('ibm')
......@@ -19,7 +20,9 @@ export class DatastoreDAO extends AbstractJournal {
super();
logger.info('In datastore.constructor.');
this.dataPartition = tenant.esd.indexOf('.') !== -1 ? tenant.esd.split('.')[0] : tenant.esd;
this.initDb(this.dataPartition);
// tslint:disable-next-line: no-floating-promises no-console
this.initDb(this.dataPartition).catch((error)=>{
LoggerFactory.build(Config.CLOUDPROVIDER).error(JSON.stringify(error));});
}
public async initDb(dataPartition: string)
......@@ -218,7 +221,7 @@ export class IbmDocDbTransactionOperation {
/**
* A wrapper class for datastore transactions
* ! Note: looks awefully close to datastore interface.
* ! Note: looks awfully close to datastore interface.
*/
export class IbmDocDbTransactionDAO extends AbstractJournalTransaction {
......@@ -374,7 +377,7 @@ export class IbmDocDbQuery implements IJournalQueryModel {
value = '';
}
logger.info('modifird values');
logger.info('modified values');
logger.debug(property);
logger.debug(operator);
logger.debug(value);
......
import Bull from 'bull';
// ============================================================================
// Copyright 2017-2019, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// ============================================================================
import { JournalFactoryTenantClient, StorageFactory } from '..';
import { DatasetDAO, DatasetModel } from '../../services/dataset';
import { Locker } from '../../services/dataset/locker';
import { SubProjectModel } from '../../services/subproject';
import { Config } from '../config';
import { LoggerFactory } from '../logger';
import Bull from 'bull';
export class StorageJobManager {
public static copyJobsQueue: Bull.Queue;
......@@ -36,7 +52,9 @@ export class StorageJobManager {
// tslint:disable-next-line: no-floating-promises
StorageJobManager.copyJobsQueue.process(50, (input) => {
return StorageJobManager.copy(input);
});
}).catch(
// tslint:disable-next-line: no-console
(error)=>{ LoggerFactory.build(Config.CLOUDPROVIDER).error(JSON.stringify(error)); });
// setup handlers for job events
StorageJobManager.setupEventHandlers();
......@@ -93,9 +111,19 @@ export class StorageJobManager {
throw err;
}
const results = await DatasetDAO.get(journalClient, input.data.datasetTo);
registeredDataset = results[0];
registeredDatasetKey = results[1];
// Retrieve the dataset metadata and key
if ((input.data.subproject as SubProjectModel).enforce_key ) {
registeredDataset = await DatasetDAO.getByKey(journalClient, input.data.datasetTo);
registeredDatasetKey = journalClient.createKey({
namespace: Config.SEISMIC_STORE_NS +
'-' + input.data.datasetTo.tenant + '-' + input.data.datasetTo.subproject,
path: [Config.DATASETS_KIND],
enforcedKey: input.data.datasetTo.path.slice(0,-1) + '/' + input.data.datasetTo.name});
} else {
const results = await DatasetDAO.get(journalClient, input.data.datasetTo);
registeredDataset = results[0];
registeredDatasetKey = results[1];
}
if (!registeredDataset) {
throw new Error('Dataset ' + datasetToPath + 'is not registered, aborting copy');
......
......@@ -28,12 +28,7 @@ export class DESCompliance {
userToken: string, ltag: string, dataPartitionID: string, appkey: string): Promise<boolean> {
if (!this._cache) {
this._cache = new Cache<boolean>({
ADDRESS: Config.DES_REDIS_INSTANCE_ADDRESS,
PORT: Config.DES_REDIS_INSTANCE_PORT,
KEY: Config.DES_REDIS_INSTANCE_KEY,
DISABLE_TLS: Config.DES_REDIS_INSTANCE_TLS_DISABLE,
}, 'ltag')
this._cache = new Cache<boolean>('ltag')
}
const res = await this._cache.get(ltag);
......
......@@ -14,10 +14,10 @@
// limitations under the License.
// ============================================================================
import { Config, ConfigFactory, TraceFactory } from '../cloud';
import { Config, ConfigFactory, LoggerFactory, TraceFactory } from '../cloud';
import { StorageJobManager } from '../cloud/shared/queue';
import { Locker } from '../services/dataset/locker';
import { Feature, FeatureFlags } from '../shared';
import { Feature, FeatureFlags, initSharedCache } from '../shared';
async function ServerStart() {
......@@ -32,11 +32,15 @@ async function ServerStart() {
await ConfigFactory.build(Config.CLOUDPROVIDER).init();
// tslint:disable-next-line
console.log('- Initializing redis cache')
console.log('- Initializing redis locker cache')
await Locker.init();
// tslint:disable-next-line
console.log('- Initializing storage transfer deamon')
console.log('- Initializing redis shared cache')
initSharedCache();
// tslint:disable-next-line
console.log('- Initializing storage transfer daemon')
StorageJobManager.setup({
ADDRESS: Config.DES_REDIS_INSTANCE_ADDRESS,
PORT: Config.DES_REDIS_INSTANCE_PORT,
......@@ -54,11 +58,11 @@ async function ServerStart() {
} catch (error) {
// tslint:disable-next-line
console.log(error);
LoggerFactory.build(Config.CLOUDPROVIDER).error(JSON.stringify(error));
process.exit(1);
}
}
// tslint:disable-next-line: no-floating-promises
ServerStart();
// tslint:disable-next-line: no-floating-promises no-console
ServerStart().catch((error)=>{ LoggerFactory.build(Config.CLOUDPROVIDER).error(JSON.stringify(error)); });
// ============================================================================
// Copyright 2017-2019, Schlumberger
// Copyright 2017-2021, Schlumberger
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
......@@ -14,13 +14,13 @@
// limitations under the License.
// ============================================================================
import bodyparser from 'body-parser';
import cors from 'cors';
import express from 'express';
import jwtProxy, { JwtProxyOptions } from 'jwtproxy';
import { Config, LoggerFactory } from '../cloud';
import { ServiceRouter } from '../services';
import { Feature, FeatureFlags } from '../shared';
import { v4 as uuidv4 } from 'uuid';
import fs from 'fs';
import https from 'https';
......@@ -94,8 +94,8 @@ export class Server {
}
this.app = express();
this.app.use(bodyparser.urlencoded({ extended: false }));
this.app.use(bodyparser.json());
this.app.use(express.urlencoded({ extended: false }));
this.app.use(express.json());
this.app.disable('x-powered-by');
this.app.use(cors(this.corsOptions));
this.app.options('*', cors());
......@@ -127,6 +127,14 @@ export class Server {
req[Config.DE_FORWARD_APPKEY] =
req.headers['appkey'] !== req.headers['x-api-key'] ? req.headers['appkey'] : undefined
// set the header correlation id and keep a reference in the response locals
if(Config.CORRELATION_ID) {
if(!req.headers[Config.CORRELATION_ID]) {
req.headers[Config.CORRELATION_ID] = uuidv4();
}
res.locals[Config.CORRELATION_ID] = req.headers[Config.CORRELATION_ID];
}
next();
});
......@@ -145,13 +153,14 @@ export class Server {
}
public async start(port = Config.SERVICE_PORT) {
this.port = port;
// The timeout of the backend service should be greater than the timeout of the load balancer. This will
// prevent premature connection closures from the service
// Additionally, the headerstimeout needs to be greater than keepalivetimeout
// Additionally, the headers-timeout needs to be greater than keep-alive-timeout
// https://github.com/nodejs/node/issues/27363
// SSL
if (Config.SSL_ENABLED){
const privateKey = fs.readFileSync(Config.SSL_KEY_PATH, 'utf8');
......
......@@ -28,6 +28,16 @@ export class DatasetDAO {
await journalClient.save(datasetEntity);
}
public static async getByKey(journalClient: IJournal, dataset: DatasetModel): Promise<DatasetModel> {
const datasetEntityKey = journalClient.createKey({
namespace: Config.SEISMIC_STORE_NS + '-' + dataset.tenant + '-' + dataset.subproject,
path: [Config.DATASETS_KIND],
enforcedKey: dataset.path.slice(0,-1) + '/' + dataset.name
});
const [entity] = await journalClient.get(datasetEntityKey);
return entity ? await this.fixOldModel(entity, dataset.tenant, dataset.subproject) : entity;
}
public static async get(
journalClient: IJournal | IJournalTransaction,
dataset: DatasetModel): Promise<[DatasetModel, any]> {
......@@ -45,25 +55,10 @@ export class DatasetDAO {
dataset.tenant, dataset.subproject), entities[0][journalClient.KEY]];
}
public static async getKey(
journalClient: IJournal | IJournalTransaction, dataset: DatasetModel): Promise<[any]> {
const query = journalClient.createQuery(
Config.SEISMIC_STORE_NS + '-' + dataset.tenant + '-' +
dataset.subproject, Config.DATASETS_KIND).select('__key__')
.filter('name', dataset.name).filter('path', dataset.path);
const [entities] = await journalClient.runQuery(query);
return entities.length === 0 ? undefined : entities[0][journalClient.KEY];
}
public static async update(
journalClient: IJournal | IJournalTransaction, dataset: DatasetModel, datasetKey: any) {
dataset.ctag = Utils.makeID(16);
await journalClient.save({ key: datasetKey, data: dataset });
}
public static async updateAll(
......@@ -109,12 +104,12 @@ export class DatasetDAO {
const [entities] = await journalClient.runQuery(query);
const todelete = [];
const datasetsToDelete = [];
for (const entity of entities) {
todelete.push(journalClient.delete(entity[journalClient.KEY]));
datasetsToDelete.push(journalClient.delete(entity[journalClient.KEY]));
}
await Promise.all(todelete);
await Promise.all(datasetsToDelete);
}
public static async delete(journalClient: IJournal | IJournalTransaction, dataset: DatasetModel) {
......@@ -135,8 +130,9 @@ export class DatasetDAO {
Config.SEISMIC_STORE_NS + '-' + dataset.tenant + '-' + dataset.subproject, Config.DATASETS_KIND)
.select(['path']).groupBy('path').filter('path', '>', dataset.path).filter('path', '<', dataset.path + '\ufffd');
const [entitieshy] = await journalClient.runQuery(query);
output.datasets = entitieshy.map((entity) => ((entity.path || '') as string).substr(dataset.path.length));
const [hierarchicalEntities] = await journalClient.runQuery(query);
output.datasets = hierarchicalEntities.map(
(entity) => ((entity.path || '') as string).substr(dataset.path.length));
output.datasets = output.datasets.map(
(entity) => entity.substr(0, entity.indexOf('/') + 1)).filter(
(elem, index, self) => index === self.indexOf(elem));
......@@ -155,9 +151,9 @@ export class DatasetDAO {
query = query.limit(pagination.limit);
}
const [entitiesds, info] = await journalClient.runQuery(query);
if (entitiesds.length !== 0) {
output.datasets = output.datasets.concat(entitiesds.map((item) => item.name));
const [datasetEntities, info] = await journalClient.runQuery(query);
if (datasetEntities.length !== 0) {
output.datasets = output.datasets.concat(datasetEntities.map((item) => item.name));
if (pagination) {
output.nextPageCursor = info.endCursor;
}
......@@ -180,10 +176,11 @@ export class DatasetDAO {
if (pagination && pagination.cursor) query = query.start(pagination.cursor);
if (pagination && pagination.limit) query = query.limit(pagination.limit);
const [entitiesds, info] = (await journalClient.runQuery(query)) as [DatasetModel[], {endCursor?: string}];
const [datasetEntities, info] = (
await journalClient.runQuery(query)) as [DatasetModel[], {endCursor?: string}];
if (entitiesds.length !== 0) {
output.datasets = entitiesds.map((entity) => {
if (datasetEntities.length !== 0) {
output.datasets = datasetEntities.map((entity) => {
return {data: entity, key: entity[journalClient.KEY]};
})
if (pagination) {
......@@ -206,9 +203,9 @@ export class DatasetDAO {
Config.SEISMIC_STORE_NS + '-' + dataset.tenant + '-' + dataset.subproject, Config.DATASETS_KIND)
.filter('path', dataset.path);
const [entitiesds] = await journalClient.runQuery(query);
const [datasetEntities] = await journalClient.runQuery(query);
if (entitiesds.length !== 0) { results.datasets = entitiesds.map((item) => item.name); }
if (datasetEntities.length !== 0) { results.datasets = datasetEntities.map((item) => item.name); }
}
// Extract all the directories structure and get the subdirectories for the required directory
...