Skip to content
Snippets Groups Projects
Commit 06e55897 authored by Siarhei Poliak [EPAM / GCP]'s avatar Siarhei Poliak [EPAM / GCP] Committed by Oleksandr Kosse (EPAM)
Browse files

Gonrg 10453 create pipeline cimpl policy

parent 51adc260
No related branches found
No related tags found
1 merge request!564Gonrg 10453 create pipeline cimpl policy
Showing
with 519 additions and 1 deletion
......@@ -65,11 +65,15 @@ include:
- project: "osdu/platform/ci-cd-pipelines"
file: "cloud-providers/gc-global.yml"
- project: "osdu/platform/ci-cd-pipelines"
file: "cloud-providers/core-global.yml"
- local: "/devops/azure/override-stages.yml"
- local: "/devops/aws/override-stages.yaml"
- local: "/devops/aws/bootstrap.yaml"
- local: "/devops/ibm/bootstrap-bundle.yml"
- local: "/devops/gc/pipeline/override-stages.yml"
- local: "/devops/core-plus/pipeline/override-stages.yml"
- local: "/loadtest/pipeline-loadtest.yml"
- local: "/publish.yml"
......
......@@ -29,7 +29,7 @@ The following software have components provided under the terms of this license:
- google-cloud-storage (from https://github.com/GoogleCloudPlatform/google-cloud-python, https://github.com/googleapis/python-storage)
- google-crc32c (from https://github.com/googleapis/python-crc32c)
- google-resumable-media (from https://github.com/googleapis/google-resumable-media-python)
- googleapis-common-protos (from https://github.com/googleapis/python-api-common-protos)
- googleapis-common-protos (from https://github.com/googleapis/google-cloud-python/tree/main/packages/googleapis-common-protos)
- kubernetes (from https://github.com/kubernetes-client/python)
- packaging (from https://pypi.org/project/packaging/, https://pypi.org/project/packaging/22.0/)
- proto-plus (from https://pypi.org/project/proto-plus/)
......
FROM python:3.12.8-alpine
# set environment variables
# PYTHONDONTWRITEBYTECODE - Prevents Python from writing pyc files to disc (equivalent to python -B option)
ENV PYTHONDONTWRITEBYTECODE=1
# PYTHONUNBUFFERED - Prevents Python from buffering stdout and stderr (equivalent to python -u option)
ENV PYTHONUNBUFFERED=1
RUN apk update && apk upgrade
RUN apk add gcc python3-dev musl-dev linux-headers libffi-dev openssl-dev
EXPOSE 8080/tcp
WORKDIR /opt
COPY ./app /opt
RUN pip install --no-cache-dir --upgrade -r /opt/requirements.txt
RUN pip install setuptools==75.7.0 --upgrade
# Create non-root user/group with numeric UID/GID
RUN addgroup -g 10001 -S nonroot && \
adduser -u 10001 -S nonroot -G nonroot
# Use numeric UID explicitly
USER 10001
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8080", "--workers", "4", "--proxy-headers"]
import os
import tarfile
from jinja2 import Environment, FileSystemLoader, select_autoescape
import argparse
class BootstrapDataPartitionBundles:
def create_and_upload_dp_bundles(dp_id):
tar_name = "bundle-{dp}.tar.gz".format(dp=dp_id)
dataauthz_template_name = "dataauthz_template.rego"
manifest_template_name = "manifest_template.manifest"
search_template_name = "search_template.rego"
dataauthz_filename = "dataauthz.rego"
manifest_filename = ".manifest"
search_filename = "search.rego"
template_path = "devops/core-plus/bootstrap-osdu-module/templates/"
env = Environment(
loader=FileSystemLoader(template_path), autoescape=select_autoescape()
)
dataauthz_template = env.get_template(dataauthz_template_name)
manifest_template = env.get_template(manifest_template_name)
search_template = env.get_template(search_template_name)
dataauthz_render = dataauthz_template.render(dp_id=dp_id)
manifest_render = manifest_template.render(dp_id=dp_id)
search_render = search_template.render(dp_id=dp_id)
with open(dataauthz_filename, "w") as f1:
f1.write(dataauthz_render)
with open(manifest_filename, "w") as f2:
f2.write(manifest_render)
with open(search_filename, "w") as f2:
f2.write(search_render)
with tarfile.open(tar_name, "w:gz") as tar_handle:
tar_handle.add(
os.path.abspath(dataauthz_filename), arcname=dataauthz_filename
)
tar_handle.add(
os.path.abspath(manifest_filename), arcname=manifest_filename
)
tar_handle.add(os.path.abspath(search_filename), arcname=search_filename)
# Initialize class and upload bundles
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--partition", required=True, type=str)
args = parser.parse_args()
BootstrapDataPartitionBundles.create_and_upload_dp_bundles(args.partition)
FROM gcr.io/google.com/cloudsdktool/cloud-sdk:alpine
WORKDIR /opt
ENV PIP_BREAK_SYSTEM_PACKAGES 1
COPY ./requirements_bootstrap.txt ./devops/core-plus/bootstrap-osdu-module/*.sh /opt/
COPY ./deployment/ /opt/deployment
COPY ./devops/core-plus/bootstrap-osdu-module /opt/devops/core-plus/bootstrap-osdu-module
RUN chmod 775 /opt/bootstrap_policy.sh
RUN apk add py3-pip wget jq
RUN pip3 install -r /opt/requirements_bootstrap.txt -r /opt/devops/core-plus/bootstrap-osdu-module/requirements.txt
RUN wget --quiet https://dl.min.io/client/mc/release/linux-amd64/mc && chmod +x mc && mv mc /usr/bin/mc
RUN addgroup -g 10001 -S nonroot \
&& adduser -h /opt -G nonroot -S -u 10001 nonroot
RUN chown -R 10001:10001 /opt
USER 10001:10001
CMD ["/bin/bash", "-c", "/opt/bootstrap_policy.sh && sleep 365d"]
# Prerequisites
* create workload identity gke service account (it is mandatory that bootstrap script will be running from SA)
* kubernetes job should be create in namespace that is free from istio-injection (job will be in RUNNING state indefenetly with side-car container)
* set all required ENV variables, they are listed in TF bootstrap job.tf under env directive
#!/usr/bin/env bash
#
# The following script renders and archives bundles of policies for instance and partition level
# After that archives are uploaded to GCS bucket or MinIO bucket
set -ex
source ./validate-env.sh "PARTITION_BASE_URL"
create_instance_bundles() {
# Renders and archives intance level policies
echo "Archiving bundle of instance policies..."
tar -czf bundle.tar.gz --directory='/opt/deployment/default-policies' --exclude='./bootstrap_sequence.json' . --verbose
mkdir --parents /opt/policies
mv bundle.tar.gz "$_"
echo "Instance policies archive is ready"
}
create_partition_bundle() {
# Renders and archives policies for data_partition
# Creates archive named bundle-<data_partition>.tar.gz in /opt/policies
# Args: $1 - data_partition_id
DATA_PARTITION=$1
echo "Archiving bundle of policies for parition: ${DATA_PARTITION}..."
python3 /opt/devops/core-plus/bootstrap-osdu-module/DataPartitionBundles.py --partition "${DATA_PARTITION}"
mv /opt/bundle-"${DATA_PARTITION}".tar.gz /opt/policies
echo "${DATA_PARTITION} partition archive is ready"
}
bootstrap_minio() {
echo "Configuring mc tool"
mc alias set minio "${MINIO_HOST}":"${MINIO_PORT}" "${MINIO_ACCESS_KEY}" "${MINIO_SECRET_KEY}"
echo "Pushing archives to Minio bucket"
for file in /opt/policies/*; do
echo "Processing $file:"
file_name=${file##*/}
# Check if file already exists
if mc stat minio/"${POLICY_BUCKET}"/"$file_name" >/dev/null 2>&1; then
echo "Skipping $file: already exists in bucket"
else
mc cp "$file" minio/"${POLICY_BUCKET}"/"$file_name"
fi
done
echo "Bootstrap finished successfully"
}
# Main part
source ./validate-env.sh "POLICY_BUCKET"
# Creating instance bundles
create_instance_bundles
# Get all partitions
PARTITIONS_LIST=$(curl --location "${PARTITION_BASE_URL}/api/partition/v1/partitions" | jq -r '[.[] | select(. != "system")] | join(",")')
# Check for partition bootstrap
if [ -z "$PARTITIONS_LIST" ]
then
echo "Partition bootstrap is not finished"
exit 1
fi
IFS=',' read -ra PARTITIONS <<< "${PARTITIONS_LIST}"
echo $PARTITIONS
# Creating partition bundles
for PARTITION in "${PARTITIONS[@]}"; do
create_partition_bundle "${PARTITION}"
done
# Uploading bundles to gcs/minio bucket
source ./validate-env.sh "MINIO_HOST"
source ./validate-env.sh "MINIO_ACCESS_KEY"
source ./validate-env.sh "MINIO_SECRET_KEY"
source ./validate-env.sh "MINIO_PORT"
bootstrap_minio
touch /tmp/bootstrap_ready
jinja2==3.1.2
package osdu.partition["{{dp_id}}"].dataauthz
import data.osdu.instance.dataauthz as centralauthz
records := centralauthz.records
{
"roots": ["osdu/partition/{{dp_id}}"]
}
package osdu.partition["{{dp_id}}"].search
default allow = false
allow = true {
input.operation == "view"
# At least one user group needs to be in acl viewers
input.record.acl.viewers[_]==input.groups[_]
}
allow = true {
input.operation == ["view", "create", "update", "delete", "purge"][_]
# At least one user group needs to be in acl owners
input.record.acl.owners[_]==input.groups[_]
}
#!/usr/bin/env bash
{ set +x ;} 2> /dev/null # disable output to prevent secret logging
set -e
ENV_VAR_NAME=$1
if [ "${!ENV_VAR_NAME}" = "" ]
then
echo "Missing environment variable '$ENV_VAR_NAME'. Please provide all variables and try again"
{ set -x ;} 2> /dev/null # enable output back
exit 1
fi
{ set -x ;} 2> /dev/null # enable output back
apiVersion: v2
name: core-plus-policy-deploy
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.19.0"
<!--- Deploy -->
# CORE Policy service
## Introduction
This chart deploys policy service on a [Kubernetes](https://kubernetes.io) cluster using [Helm](https://helm.sh) package manager.
## Prerequisites
The code was tested on **Kubernetes cluster** (v1.23.12) with **Istio** (1.15)
> It is possible to use other versions, but it hasn't been tested
### Operation system
The code works in Debian-based Linux (Debian 10 and Ubuntu 20.04) and Windows WSL 2. Also, it works but is not guaranteed in Google Cloud Shell. All other operating systems, including macOS, are not verified and supported.
### Packages
Packages are only needed for installation from a local computer.
- **HELM** (version: v3.7.1 or higher) [helm](https://helm.sh/docs/intro/install/)
- **Kubectl** (version: v1.23.12 or higher) [kubectl](https://kubernetes.io/docs/tasks/tools/#kubectl)
## Installation
First you need to set variables in **values.yaml** file using any code editor. Some of the values are prefilled, but you need to specify some values as well. You can find more information about them below.
### Global variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**global.domain** | your domain for the external endpoint, ex `example.com` | string | - | yes
**global.limitsEnabled** | whether CPU and memory limits are enabled | boolean | `true` | yes
**global.dataPartitionId** | data partition id | string | - | yes
**global.logLevel** | severity of logging level | string | `ERROR` | yes
### Common variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|----------|
**data.logLevel** | logging severity level for this service only | string | - | yes, only if differs from the `global.logLevel`
**data.image** | policy image name | string | - | yes
**data.requestsCpu** | amount of requests CPU | string | `10m` | yes
**data.requestsMemory** | amount of requests memory| string | `200Mi` | yes
**data.limitsCpu** | CPU limit | string | `1` | only if `global.limitsEnabled` is true
**data.limitsMemory** | memory limit | string | `1G` | only if `global.limitsEnabled` is true
**data.serviceAccountName** | name of your service account | string | - | yes
**data.imagePullPolicy** | when to pull image | string | `IfNotPresent` | yes
**data.bucketName** | bucket name | string | - | yes
**data.scopes** | scope of OPA | string | `https://www.googleapis.com/auth/devstorage.read_only` | yes
**data.entitlementsHost** | Entitlements host | string | `http://entitlements` | yes
**data.entitlementsBasePath** | Entitlements path | string | `/api/entitlements/v2/groups` | yes
**data.useBundles** | use bundle or not | string | `yes` | yes
**data.legalHost** | Legal host | string | `http://legal` | yes
**data.partitionHost** | Partition host | string | `http://partition` | yes
### Baremetal variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**data.minioHost** | minio host | string | `http://minio:9000` | yes
**conf.minioSecretName** | secret name for the app | string | `policy-minio-secret` | yes
### Config variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**conf.appName** | name of the app | string | `policy` | yes
**conf.configmap** | configmap to be used | string | `policy-config` | yes
**conf.bootstrapSecretName** | secret name for the bootstrap | string | `minio-bootstrap-secret` | yes
**conf.minDelaySeconds** | min delay for bundle download | num | `6` | yes
**conf.maxDelaySeconds** | max delay for bundle download | num | `12` | yes
### Bootstrap variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**data.bootstrapImage** | image for bootstrap deployment | string | - | yes
**data.bootstrapServiceAccountName** | service account for bootstrap deployment | string | - | yes
### OPA variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**opa.conf.configmap** | configmap to be used | string | `opa-config` | yes
**opa.conf.envConfig** | configmap with env vars | string | `opa-env-config` | yes
**opa.conf.appName** | name of the app | string | `opa` | yes
**opa.data.serviceAccountName** | name of your service account | string | `opa-k8s` | yes
**opa.data.image** | image name | string | - | yes
### ISTIO variables
| Name | Description | Type | Default |Required |
|------|-------------|------|---------|---------|
**istio.proxyCPU** | CPU request for Envoy sidecars | string | `10m` | yes
**istio.proxyCPULimit** | CPU limit for Envoy sidecars | string | `500m` | yes
**istio.proxyMemory** | memory request for Envoy sidecars | string | `100Mi` | yes
**istio.proxyMemoryLimit** | memory limit for Envoy sidecars | string | `512Mi` | yes
**istio.bootstrapProxyCPU** | CPU request for Envoy sidecars | string | `10m` | yes
**istio.bootstrapProxyCPULimit** | CPU limit for Envoy sidecars | string | `100m` | yes
### Methodology for Parameter Calculation variables: **hpa.targetValue**, **limits.maxTokens** and **limits.tokensPerFill**
The parameters **hpa.targetValue**, **limits.maxTokens** and **limits.tokensPerFill** were determined through empirical testing during load testing. These tests were conducted using the N2D machine series, which can run on either AMD EPYC Milan or AMD EPYC Rome processors. The values were fine-tuned to ensure optimal performance under typical workloads.
### Recommendations for New Instance Types
When changing the instance type to a newer generation, such as the C3D series, it is essential to conduct new load testing. This ensures the parameters are recalibrated to match the performance characteristics of the new processor architecture, optimizing resource utilization and maintaining application stability.
### Install the helm chart
Run this command from within this directory:
```console
helm install core-plus-policy-deploy .
```
## Uninstalling the Chart
To uninstall the helm deployment:
```console
helm uninstall core-plus-policy-deploy
```
[Move-to-Top](#core-plus-policy-service)
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ .Values.opa.conf.appName }}"
labels:
app: "{{ .Values.opa.conf.appName }}"
type: core
namespace: "{{ .Release.Namespace }}"
spec:
replicas: {{ .Values.opa.conf.replicas }}
strategy:
type: Recreate
selector:
matchLabels:
app: "{{ .Values.opa.conf.appName }}"
template:
metadata:
labels:
app: "{{ .Values.opa.conf.appName }}"
annotations:
rollme: {{ randAlphaNum 5 | quote }}
sidecar.istio.io/proxyCPU: {{ .Values.istio.proxyCPU | quote }}
sidecar.istio.io/proxyMemory: {{ .Values.istio.proxyMemory | quote }}
sidecar.istio.io/proxyCPULimit: {{ .Values.istio.proxyCPULimit | quote }}
sidecar.istio.io/proxyMemoryLimit: {{ .Values.istio.proxyMemoryLimit | quote }}
name: "{{ .Values.opa.conf.appName }}"
spec:
initContainers:
- name: "{{ .Values.opa.init.name }}"
image: "{{ .Values.opa.init.image }}"
securityContext:
runAsUser: 1337
volumeMounts:
- name: config
mountPath: /config
envFrom:
- configMapRef:
name: {{ printf "%s-bootstrap" .Values.conf.configmap | quote }}
containers:
- name: "{{ .Values.opa.conf.appName }}"
image: "{{ .Values.opa.data.image }}"
imagePullPolicy: "{{ .Values.data.imagePullPolicy }}"
ports:
- containerPort: 8181
args:
- "run"
- "--ignore=.*" # exclude hidden dirs created by Kubernetes
- "--server"
- "--config-file=/config/config.yaml"
resources:
requests:
cpu: "{{ .Values.data.requestsCpu }}"
memory: "{{ .Values.opa.data.requestsMemory }}"
{{- if .Values.global.limitsEnabled }}
limits:
cpu: "{{ .Values.data.limitsCpu }}"
memory: "{{ .Values.data.limitsMemory }}"
{{- end }}
volumeMounts:
- name: config
mountPath: /config
envFrom:
- configMapRef:
name: "{{ .Values.opa.conf.envConfig }}"
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: "{{ .Values.conf.minioSecretName }}"
key: MINIO_ACCESS_KEY
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: "{{ .Values.conf.minioSecretName }}"
key: MINIO_SECRET_KEY
- name: AWS_REGION
valueFrom:
secretKeyRef:
name: "{{ .Values.conf.minioSecretName }}"
key: AWS_REGION
securityContext:
allowPrivilegeEscalation: false
runAsNonRoot: true
volumes:
- name: config
emptyDir: {}
serviceAccountName: "{{ .Values.opa.data.serviceAccountName }}"
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: "{{ .Values.opa.conf.appName }}"
name: "{{ .Values.opa.conf.envConfig }}"
namespace: "{{ .Release.Namespace }}"
data:
LEGAL_BASE_URL: "{{ .Values.data.legalHost }}"
ENTITLEMENTS_BASE_URL: "{{ .Values.data.entitlementsHost }}"
apiVersion: v1
kind: ServiceAccount
metadata:
name: "{{ .Values.opa.data.serviceAccountName }}"
namespace: "{{ .Release.Namespace }}"
kind: Service
apiVersion: v1
metadata:
name: "{{ .Values.opa.conf.appName }}"
labels:
app: "{{ .Values.opa.conf.appName }}"
namespace: "{{ .Release.Namespace }}"
spec:
type: ClusterIP
ports:
- protocol: TCP
port: 80
targetPort: 8181
name: http
selector:
app: "{{ .Values.opa.conf.appName }}"
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: "{{ .Values.conf.appName }}"
name: "{{ .Values.conf.configmap }}-bootstrap"
namespace: "{{ .Release.Namespace }}"
data:
POLICY_BUCKET: "{{ .Values.data.bucketName }}"
PARTITION_BASE_URL: {{ .Values.data.partitionHost | quote }}
ONPREM_ENABLED: "true"
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app: "{{ .Values.conf.appName }}"
name: "{{ .Values.conf.configmap }}"
namespace: "{{ .Release.Namespace }}"
data:
LOG_LEVEL: {{ .Values.data.logLevel | default .Values.global.logLevel | quote }}
OPA_URL: {{ printf "http://%s" .Values.opa.conf.appName | quote }}
ENTITLEMENTS_BASE_URL: "{{ .Values.data.entitlementsHost }}"
ENTITLEMENTS_BASE_PATH: "{{ .Values.data.entitlementsBasePath }}"
LEGAL_BASE_URL: "{{ .Values.data.legalHost }}"
POLICY_BUCKET: "{{ .Values.data.bucketName }}"
USE_BUNDLES: "{{ .Values.data.useBundles }}"
CLOUD_PROVIDER: "baremetal"
MINIO_ENDPOINT: "{{ .Values.data.minioHost }}"
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment