Commit e36074c5 authored by Daniel Perez's avatar Daniel Perez
Browse files

ci: updating templates for ci cd pipeline

parent fc4de63b
Pipeline #65301 failed with stages
in 2 minutes and 13 seconds
......@@ -4,13 +4,13 @@ sdms_compile-and-unit-test:
stage: build
artifacts:
reports:
junit: app/$SERVICE/test-results.xml
cobertura: app/$SERVICE/coverage/cobertura-coverage.xml
junit: app/$SDMS_SERVICE/test-results.xml
cobertura: app/$SDMS_SERVICE/coverage/cobertura-coverage.xml
paths:
- app/$SERVICE/coverage/
- app/$SDMS_SERVICE/coverage/
expire_in: 15 days
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- chmod +x devops/scripts/utest_build_run.sh
- devops/scripts/utest_build_run.sh
only:
......
......@@ -28,7 +28,7 @@ sdms_aws-containerize:
docker build -f $BUILD_DIR/Dockerfile -t $LOCAL_IMAGE_TAG .;
else
echo Building BUILDER->RUNTIME containers;
cd app/$SERVICE;
cd app/$SDMS_SERVICE;
docker build -t builder:latest --file $AWS_BUILDER_DOCKERFILE_PATH .;
docker build -t $LOCAL_IMAGE_TAG --file $AWS_RUNTIME_DOCKERFILE_PATH --build-arg docker_builder_image=builder .;
fi
......@@ -60,7 +60,7 @@ sdms_aws-update-eks:
stage: deploy
needs: ['sdms_aws-containerize']
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- export KUBECONFIG=/tmp/kubeconfig-${RANDOM}.yaml
- export EKS_CLUSTER_MGMT_ROLE=$(aws cloudformation describe-stacks --region $AWS_REGION --stack-name $AWS_INFRA_STACK_NAME --query "Stacks[0].Outputs[?OutputKey=='MainEKSClusterManagementRole'].OutputValue" --output text)
- export EKS_CLUSTER_NAME=$(aws cloudformation describe-stacks --region $AWS_REGION --stack-name $AWS_INFRA_STACK_NAME --query "Stacks[0].Outputs[?OutputKey=='MainEKSClusterName'].OutputValue" --output text)
......@@ -90,7 +90,7 @@ sdms_aws-test-newman:
image: $CI_REGISTRY/osdu/platform/deployment-and-operations/base-containers-aws/aws-node/aws-node:v1.0-node14
needs: [{ job: 'sdms_aws-update-eks', optional: true }]
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- pip3 install -r devops/aws/requirements.txt
- svctoken=$(python3 devops/scripts/aws_jwt_client.py)
- npm install -g newman
......
......@@ -20,14 +20,14 @@ sdms_push_runtime_image_azure:
tags: ["osdu-medium"]
stage: containerize
variables:
SHA_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
SHA_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
before_script:
- az login --service-principal -u $AZURE_PRINCIPAL_ID -p $AZURE_PRINCIPAL_SECRET --tenant $AZURE_TENANT_ID
- az acr login -n $AZURE_REGISTRY
script:
# Azure Container Registry
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- docker build -t builder_sdms:latest --file docker/builder.Dockerfile .
- docker build -t $UTEST_RUNTIME_IMAGE --file docker/runtime.Dockerfile --build-arg docker_builder_image=builder_sdms .
- docker tag $UTEST_RUNTIME_IMAGE ${AZURE_REGISTRY}.azurecr.io/$SHA_IMAGE
......@@ -53,7 +53,7 @@ sdms_azure_deploy:
variables:
AZURE_KEYVAULT: osdu-svc-properties
DES_URL: ${AZURE_DNS_NAME}
IMAGE: ${AZURE_REGISTRY}.azurecr.io/${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}
IMAGE: ${AZURE_REGISTRY}.azurecr.io/${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}
SERVICE_NAME: ${AZURE_SERVICE}
TAG: ${CI_COMMIT_SHA}
before_script:
......@@ -61,7 +61,7 @@ sdms_azure_deploy:
- az aks get-credentials -g $AZURE_UNIQUE-rg -n $AZURE_UNIQUE-aks
script:
# Replace values in config file
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- cp ${CHART_PATH}/helm-config.yaml ${CHART_PATH}/values.yaml
- sed -i 's,#{CONTAINER_REGISTRY_NAME}#,'$IMAGE',' ${CHART_PATH}/values.yaml
- sed -i 's,#{DNS_HOST}#,'$DES_URL',' ${CHART_PATH}/values.yaml
......@@ -79,8 +79,8 @@ sdms_azure_deploy:
- helm upgrade $SERVICE_NAME ${CHART_PATH} --install --dry-run --values $CHART_PATH/values.yaml
- helm upgrade $SERVICE_NAME ${CHART_PATH} --install --values $CHART_PATH/values.yaml
# Wait for service to be running to start
- kubectl rollout status deployment.v1.apps/$SERVICE_NAME -n osdu --timeout=900s
- pod=$(kubectl get pod -n osdu|grep $PROJECT_NAME |tail -1 |awk '{print $1}')
- kubectl rollout status deployment.v1.apps/$SDMS_SERVICE_NAME -n osdu --timeout=900s
- pod=$(kubectl get pod -n osdu|grep $SDMS_PROJECT_NAME |tail -1 |awk '{print $1}')
- status=$(kubectl wait -n osdu --for=condition=Ready pod/$pod --timeout=600s)
- if [[ "$status" != *"met"* ]]; then echo "POD didn't start correctly" ; exit 1 ; fi
only:
......@@ -105,7 +105,7 @@ sdms_azure_test:
AZURE_TESTER_SERVICEPRINCIPAL_SECRET: $AZURE_PRINCIPAL_SECRET
INTEGRATION_TESTER: $AZURE_PRINCIPAL_ID
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- apt-get update
- apt-get install -y python
- apt-get install -y python-pip
......
......@@ -48,14 +48,14 @@ sdms_push_runtime_image_gcp:
tags: ["osdu-medium"]
stage: containerize
variables:
SHA_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
SHA_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
before_script:
- gcloud auth activate-service-account --key-file="$GCP_DEPLOY_FILE"
- gcloud config set project $GCP_PROJECT
script:
# GCP Container Registry
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- gcloud builds submit --config provider/$GCP_VENDOR/cloudbuild/cloudbuild.yaml --substitutions=_GCP_SERVICE=$GCP_SERVICE,_APPLICATION_NAME=$GCP_APPLICATION_NAME,_PROVIDER_NAME=$GCP_VENDOR,_SHORT_SHA=$CI_COMMIT_SHORT_SHA,_PORT=$PORT
only:
variables:
......@@ -75,7 +75,7 @@ sdms_osdu-gcp-containerize-gcloud:
image: gcr.io/google.com/cloudsdktool/cloud-sdk
cache: {}
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- gcloud auth activate-service-account --key-file $OSDU_GCP_DEPLOY_FILE
- gcloud config set project $OSDU_GCP_PROJECT
- touch .gcloudignore
......@@ -100,7 +100,7 @@ sdms_osdu-gcp-containerize-gitlab:
cache: {}
allow_failure: true
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- export EXTRA_DOCKER_TAG=""; if [ "$CI_COMMIT_TAG" != "" ] ; then EXTRA_DOCKER_TAG="-t $CI_REGISTRY_IMAGE/osdu-gcp:$CI_COMMIT_TAG" ; elif [ "$CI_COMMIT_REF_NAME" = "master" ] ; then EXTRA_DOCKER_TAG="-t $CI_REGISTRY_IMAGE/osdu-gcp:latest" ; fi
- docker build -t $CI_REGISTRY_IMAGE/osdu-gcp:$CI_COMMIT_SHORT_SHA $EXTRA_DOCKER_TAG --file docker/runtime.Dockerfile .
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
......
......@@ -12,12 +12,12 @@ sdms_ibm-deploy:
stage: deploy
needs: ['sdms_compile-and-unit-test']
script:
- cd app/sdms
- cd app/$SDMS_SERVICE
- echo "In IBM deploy"
- oc project $IBM_OPENSHIFT_PROJECT
- oc get bc/$PROJECT_NAME 2> /dev/null || oc new-build --name $PROJECT_NAME --binary --strategy source --image-stream openshift/ubi8-nodejs-14
- oc start-build $PROJECT_NAME --from-dir=. --follow
- oc get service $PROJECT_NAME 2> /dev/null || oc new-app $PROJECT_NAME
- oc get bc/$SDMS_PROJECT_NAME 2> /dev/null || oc new-build --name $SDMS_PROJECT_NAME --binary --strategy source --image-stream openshift/ubi8-nodejs-14
- oc start-build $SDMS_PROJECT_NAME --from-dir=. --follow
- oc get service $SDMS_PROJECT_NAME 2> /dev/null || oc new-app $SDMS_PROJECT_NAME
only:
variables:
- $IBM == 'true'
......@@ -34,13 +34,13 @@ sdms_ibm-deploy-devpri:
stage: deploy
needs: ['sdms_compile-and-unit-test']
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- echo "In IBM deploy"
- oc login --token=$IBM_PRIMARY_DEV_TOKEN --server=$IBM_PRIMARY_DEV_URL
- oc project $IBM_OPENSHIFT_DEVPRI_PROJECT
- oc get bc/$PROJECT_NAME 2> /dev/null || oc new-build --name $PROJECT_NAME --binary --strategy source --image-stream openshift/ubi8-nodejs-14
- oc start-build $PROJECT_NAME --from-dir=. --follow
- oc get service $PROJECT_NAME 2> /dev/null || oc new-app $PROJECT_NAME
- oc get bc/$SDMS_PROJECT_NAME 2> /dev/null || oc new-build --name $SDMS_PROJECT_NAME --binary --strategy source --image-stream openshift/ubi8-nodejs-14
- oc start-build $SDMS_PROJECT_NAME --from-dir=. --follow
- oc get service $SDMS_PROJECT_NAME 2> /dev/null || oc new-app $SDMS_PROJECT_NAME
rules:
- if: '$CI_COMMIT_BRANCH == "master"'
changes:
......@@ -56,7 +56,7 @@ sdms_ibm-test:
allow_failure: false
needs: ["sdms_ibm-deploy"]
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- apt-get update
- apt-get install -y python
- apt-get install -y python-pip
......@@ -82,7 +82,7 @@ sdms_ibm-preship-promote:
stage: publish
needs: ['sdms_ibm-test']
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- oc login --token=$IBM_PRESHIP_OPENSHIFT_TOKEN --server=$IBM_PRESHIP_OPENSHIFT_URL
- oc project $IBM_OPENSHIFT_PRESHIP_PROJECT
- oc start-build og-$PROJECT_NAME-ibm
......
......@@ -5,13 +5,13 @@ sdms_push_runtime_image:
tags: ["osdu-medium"]
stage: containerize
variables:
SHA_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
SHA_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:${CI_COMMIT_SHA}
LATEST_IMAGE: ${SDMS_PROJECT_NAME}-${CI_COMMIT_REF_SLUG}:latest
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
# Gitlab Container Registry
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- docker build -t builder_sdms:latest --file docker/builder.Dockerfile .
- docker build -t $UTEST_RUNTIME_IMAGE --file docker/runtime.Dockerfile --build-arg docker_builder_image=builder_sdms .
- docker tag $UTEST_RUNTIME_IMAGE $CI_REGISTRY_IMAGE/$SHA_IMAGE
......
variables:
#seismic store service variables
SERVICE: sdms
PROJECT_TITLE: seismic-dms-service
PROJECT_NAME: seismic-store-service
SDMS_SERVICE: sdms
SDMS_PROJECT_TITLE: seismic-dms-service
SDMS_PROJECT_NAME: seismic-store-service
ENVIRONMENT: cloud
ISGITLAB: "true"
PORT: 80
......
......@@ -23,17 +23,17 @@ sdms_fossa-check-notice:
artifacts:
when: on_failure
paths:
- app/$SERVICE/public
- app/$SDMS_SERVICE/public
script:
# fossa-check-for-licensing-issues needs a CI_COMMIT_BRANCH defined to know how to parse the FOSSA API results
# When building tags, this isn't defined by GitLab. In that case, we use the tag name instead. If that's not defined
# then things will fail and we'll have to make this smarter
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- test -z "$CI_COMMIT_BRANCH" && export CI_COMMIT_BRANCH="$CI_COMMIT_TAG"
- mkdir -p public
- fossa report licenses --project "${PROJECT_TITLE}" --branch "${CI_COMMIT_BRANCH}" > public/NOTICE
- fossa report licenses --project "${SDMS_PROJECT_TITLE}" --branch "${CI_COMMIT_BRANCH}" > public/NOTICE
- mkdir committed-NOTICE generated-NOTICE
- |
python <<EOF
......@@ -135,7 +135,7 @@ sdms_fossa-report:
- merge_requests
variables:
FOSSA_REPORT_URL: https://app.fossa.com/api/revisions/custom%2B12773%2F${PROJECT_NAME}%24${CI_COMMIT_SHA}/attribution?access=${FOSSA_ACCESS}&includeProjectLicense=true&includeLicenseScan=true&includeDirectDependencies=true&includeDeepDependencies=true&includeLicenseHeaders=&includeLicenseList=true&format=HTML
FOSSA_REPORT_URL: https://app.fossa.com/api/revisions/custom%2B12773%2F${SDMS_PROJECT_NAME}%24${CI_COMMIT_SHA}/attribution?access=${FOSSA_ACCESS}&includeProjectLicense=true&includeLicenseScan=true&includeDirectDependencies=true&includeDeepDependencies=true&includeLicenseHeaders=&includeLicenseList=true&format=HTML
artifacts:
paths:
......
......@@ -12,10 +12,10 @@ sdms_fossa-analyze:
# fossa-check-for-licensing-issues needs a CI_COMMIT_BRANCH defined to know how to parse the FOSSA API results
# When building tags, this isn't defined by GitLab. In that case, we use the tag name instead. If that's not defined
# then things will fail and we'll have to make this smarter
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- test -z "$CI_COMMIT_BRANCH" && export CI_COMMIT_BRANCH="$CI_COMMIT_TAG"
- npm ci --production
- fossa analyze --project "${PROJECT_TITLE}" --project-url "${CI_PROJECT_URL}" --branch "${CI_COMMIT_BRANCH}"
- fossa analyze --project "${SDMS_PROJECT_TITLE}" --project-url "${CI_PROJECT_URL}" --branch "${CI_COMMIT_BRANCH}"
- fossa-check-for-licensing-issues
only:
variables:
......
......@@ -4,7 +4,7 @@ sdms_lint:
stage: scan
needs: ['sdms_compile-and-unit-test']
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- npm install -g typescript
- npm install -g tslint
- npm run lint
......
......@@ -6,7 +6,7 @@ sdms_scan-for-secrets:
- job: sdms_compile-and-unit-test
artifacts: false
script:
- cd app/$SERVICE
- cd app/$SDMS_SERVICE
- detect-secrets-hook --exclude-files npm-shrinkwrap.json --exclude-files package.json --exclude-files devops/osdu/scanners/scan-for-secrets-node.yml --baseline devops/docker/detect_secrets/.secrets.baseline $(git ls-files)
only:
changes:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment