Commit e04b8189 authored by snehal jagtap's avatar snehal jagtap
Browse files

initial commit

parent 64976ee6
Pipeline #62035 failed with stages
in 3 minutes and 7 seconds
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
apiVersion: v1
name: osdu-cloud-data-ingestion
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application.
appVersion: 1.16.0
env = "{{ .Values.pds.envTag }}"
cloud {
project {
name = "{{ .Values.googleProjectId }}"
}
global-project {
name = "{{ .Values.globalResourcesProjectId }}"
}
pubsub {
subscription {
name = "{{ .Values.pds.envTag }}-data-subscription-bigtable"
parallel-pull-count = 10
ack-deadline-seconds = 60
retain-acked-messages = true
}
topic {
name = "{{ .Values.pds.envTag }}-data-gateway"
}
cache-coherence-outbound {
outbound {
topic {
name = "buckets-gateway-{{ .Values.pds.envTag }}"
}
}
publishRetryCount = 3
}
cache-coherence-outbound-2 {
outbound {
topic {
name = "buckets-gateway-2-{{ .Values.pds.envTag }}"
}
}
publishRetryCount = 3
}
}
bigtable {
instance {
name = "{{ .Values.pds.bigtableInstance }}"
}
table {
data {
name = "ts-{{ .Values.pds.envTag }}-v2"
}
index {
name = "tsi-{{ .Values.pds.envTag }}-v2"
}
changelog {
name = "changelog-{{ .Values.pds.envTag }}-v2"
}
}
columnFamily {
names = ["d"]
}
user {
agent = "osdu-user-agent"
}
}
memorystore {
host = "memorystore-redis-lb"
port = 6379
message-channel = "unit-catalog-channel"
}
}
bootloader {
class = "org.opengroup.osdu.production.PipelineApp"
}
quartz {
schedules {
probeSchedule {
description = "A cron job that runs every 2 minutes"
expression = "0 * * ? * * *"
}
}
}
batch{
dataPoints{
throttleCost = 20000,
segmentSize = 1000
}
}
modifyAckDeadlineJob{
jobIntervalMilliseconds = 100000
ackDeadlineMilliseconds = 600000
#this should be less than pubSub message ack deadline which is 600 seconds
oldPubSubMessageOffsetMilliseconds = 500000
}
unitCatalogService{
hostname = "unit-catalog",
port = 9090,
geturi = "/unit-catalog/api/v1/catalog"
secret = ""
}
metrics_config {
stack_driver {
kubernetesHostNameValue = "KUBERNETES_SERVICE_HOST",
monitoredResourceName = "k8s_cluster",
labels = {
clusterNameValue = "cluster_name",
locationNameValue = "location"
}
}
custom_metrics = [
{
"name": "sli_pss_writeback_api_data_ingestion_latency",
"description": "Metric to capture the data ingestion latency of writeback API (single + batch)",
"type": {
"count": false,
"sum": false,
"distribution": true
},
"unit": "ms",
"labels": {
"project": "project",
"tenant": "tenant",
"request_type":"request_type"
}
}
]
}
\ No newline at end of file
kind: Job
apiVersion: batch/v1
metadata:
name: timeseries-restore-data-job
namespace: default
spec:
parallelism: 400
template:
spec:
containers:
- name: timeseries-restore-data-job
image: gcr.io/<REPLACE_CONTAINER_REGISTRY_GOOGLE_PROJECT_NAME>/timeseries-restore:latest
imagePullPolicy: Always
resources:
requests:
memory: "6Gi"
command: ["/opt/osdu-cloud-data-ingestion/bin/restore-data-job.sh"]
env:
- name: GOOGLE_PROJECT_NAME
value: <REPLACE_GOOGLE_PROJECT_NAME>
- name: GLOBAL_GOOGLE_PROJECT_NAME
value: <REPLACE_GLOBAL_GOOGLE_PROJECT_NAME>
- name: BACKUP_TENANT_NAME
value: <REPLACE_BACKUP_TENANT_NAME>
- name: RESTORE_TENANT_NAME
value: <REPLACE_RESTORE_TENANT_NAME>
restartPolicy: Never
\ No newline at end of file
kind: Job
apiVersion: batch/v1
metadata:
name: {{ template "timeseries-restore-metadata-publish-job.fullname" . }}
namespace: default
spec:
template:
spec:
containers:
- name: timeseries-restore-metadata-publish-job
image: gcr.io/<REPLACE_CONTAINER_REGISTRY_GOOGLE_PROJECT_NAME>/timeseries-restore:latest
command: ["/opt/osdu-cloud-data-ingestion/bin/restore-metadata-publish-job.sh"]
imagePullPolicy: Always
env:
- name: {{env.Getenv "GOOGLE_PROJECT_NAME"}}
- name: {{env.Getenv "GLOBAL_GOOGLE_PROJECT_NAME"}}
- name: BACKUP_TENANT_NAME
value: <REPLACE_BACKUP_TENANT_NAME>
- name: RESTORE_TENANT_NAME
value: <REPLACE_RESTORE_TENANT_NAME>
restartPolicy: Never
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "osdu-cloud-data-ingestion.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "osdu-cloud-data-ingestion.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "osdu-cloud-data-ingestion.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "osdu-cloud-data-ingestion.labels" -}}
app.kubernetes.io/name: {{ include "osdu-cloud-data-ingestion.name" . }}
helm.sh/chart: {{ include "osdu-cloud-data-ingestion.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}
spec:
scaleTargetRef:
apiVersion: extensions/v1beta1
kind: Deployment
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}
minReplicas: 1
maxReplicas: 4
metrics:
- type: Pods
pods:
metricName: network_packets_received_rate
targetAverageValue: 10000m
- type: Pods
pods:
metricName: network_packets_transmit_rate
targetAverageValue: 10000m
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: timeseries-backup-job
spec:
schedule: "*/5 * * * *"
concurrencyPolicy: Forbid
startingDeadlineSeconds: 100
jobTemplate:
spec:
template:
spec:
containers:
- name: timeseries-backup-job
image: "{{ .Values.pds.imageRepository }}/timeseries-backup:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
requests:
memory: "4Gi"
env:
- name: ENV-TAG-KEY
value: "{{ .Values.pds.envTag }}"
- name: GOOGLE-PROJECT-NAME-KEY
value: "{{ .Values.globalResourcesProjectId }}"
- name: BUCKET-NAME-KEY
value: "{{ .Values.pds.envTag }}-prodops-backup-bucket"
restartPolicy: Never
\ No newline at end of file
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}-config
data:
app-config.conf: |
{{ tpl (.Files.Get "files/config.conf") . | indent 4 }}
metrics.conf: |
{{ tpl (.Files.Get "files/metrics.conf") . | indent 4 }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}
labels:
{{ include "osdu-cloud-data-ingestion.labels" . | indent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "osdu-cloud-data-ingestion.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
annotations:
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
labels:
app.kubernetes.io/name: {{ include "osdu-cloud-data-ingestion.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.pds.imageRepository }}/{{ .Chart.Name }}:{{ .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
volumeMounts:
- name: application-config
mountPath: /opt/config-mount
livenessProbe:
exec:
command:
- rm
- /opt/osdu-cloud-data-ingestion/bin/alive
initialDelaySeconds: 90
periodSeconds: 120
failureThreshold: 3
resources:
{{ toYaml .Values.resources | indent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 8 }}
{{- end }}
volumes:
- name: application-config
configMap:
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}-config
apiVersion: v1
kind: Service
metadata:
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}-metrics
annotations:
prometheus.io/port: "9091"
prometheus.io/scrape: "true"
spec:
type: NodePort
selector:
app.kubernetes.io/name: {{ include "osdu-cloud-data-ingestion.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
ports:
- protocol: TCP
port: 9091
targetPort: 9091
name: {{ template "osdu-cloud-data-ingestion.fullname" . }}-metrics
\ No newline at end of file
replicaCount: 1
image:
pullPolicy: IfNotPresent
nameOverride: ""
fullnameOverride: ""
service:
type: ClusterIP
port: 9090
resources:
requests:
memory: "1Gi"
nodeSelector: {}
tolerations: []
affinity: {}
\ No newline at end of file
sbt.version=1.2.8
\ No newline at end of file
import sbt.{Resolver, addSbtPlugin}
//format: OFF
resolvers += Classpaths.typesafeReleases
resolvers += Resolver.url("bintray-sbt-plugin-releases",url("https://dl.bintray.com/content/sbt/sbt-plugin-releases"))(Resolver.ivyStylePatterns)
resolvers += GCSResolver.forBucket("pdi-ivy-release-repo")
val typesafeRepo = "com.typesafe.sbt"
addSbtPlugin(typesafeRepo % "sbt-git" % "0.9.3")
addSbtPlugin(typesafeRepo % "sbt-license-report" % "1.2.0")
addSbtPlugin(typesafeRepo % "sbt-native-packager" % "1.3.2")
addSbtPlugin("org.scoverage" % "sbt-scoverage" % "1.6.1")
addSbtPlugin("org.scalariform" % "sbt-scalariform" % "1.8.0")
addSbtPlugin("org.scalastyle" %% "scalastyle-sbt-plugin" % "1.0.0")
addSbtPlugin("com.github.gseitz" % "sbt-release" % "1.0.7")
addSbtPlugin("net.virtual-void" % "sbt-dependency-graph" % "0.9.2")
addSbtPlugin("net.vonbuchholtz" % "sbt-dependency-check" % "2.0.0")
addSbtPlugin("com.thesamet" % "sbt-protoc" % "0.99.12")
addSbtPlugin("com.lightbend" % "sbt-google-cloud-storage" % "0.0.10")
addSbtPlugin("org.opengroup.osdu.production" % "osdu-common-deps" % "0.0.57")
\ No newline at end of file
import sbt.{Resolver, addSbtPlugin}
//format: OFF
addSbtPlugin("com.lightbend" % "sbt-google-cloud-storage" % "0.0.10")
\ No newline at end of file
## Description
Add your Code change description here.
## Type of changes is acceptable ( New Feature, Fix, Breaking Change )
- [ ] New Feature
- [ ] Fix
- [ ] Breaking Change
## Unit Test added in the build process and results are..
- [ ] Success
- [ ] Failed
- [ ] Not Applicable
## Integration Test added in the build process and results are..
- [ ] Success
- [ ] Failed
- [ ] Not Applicable
## Code coverage percentage should be above threshold.
- [ ] Yes
- [ ] No
- [ ] Not Applicable
## Automation test are linked to Dev/QA/Performance stages.
- [ ] Yes
- [ ] No
- [ ] Not Applicable
## Automation test results are.
- [ ] Success
- [ ] Failed
## Dependant services have been deployed to all stages without any failure ?
- [ ] Yes
- [ ] No
- [ ] Not Applicable
## Dependant infrastructure services have been deployed to all stages without any failure ?
- [ ] Yes
- [ ] No
- [ ] Not Applicable
\ No newline at end of file
sonar.projectKey=PDS_INGESTION_PIPELINE
sonar.projectName=PDS_INGESTION_PIPELINE
sonar.analysis.mode=publish
sonar.projectVersion=PROJECT_BUILD_VERSION
sonar.sources=src/main/scala
sonar.scoverage.reportPath=target/scala-2.12/scoverage-report/scoverage.xml
sonar.projectBaseDir=.
sonar.java.binaries=target/scala-2.12/classes/
\ No newline at end of file
env = "local"
cloud {
global-project {
name = "GLOBAL_GOOGLE_PROJECT_NAME"
}
project {
name = "GOOGLE_PROJECT_NAME"
}
pubsub {
subscription {
name = "A0000001-integration-test-subscription"
ack-deadline-seconds = 60
parallel-pull-count = 10
retain-acked-messages = false
}
topic {
name = "A0000001-test"
}
cache-coherence-outbound {
outbound {
topic {
name = "buckets-gateway-it-test"
}
}
publishRetryCount = 3
}
cache-coherence-outbound-2 {
outbound {
topic {
name = "buckets-gateway-2-it-test"
}
}
publishRetryCount = 3
}
}
bigtable {
instance {
name = "timeseries"
}
table {
data {
name = "ts-it"
}
index {
name = "tsi-it"
}
changelog {
name = "changelog-it-test"
}
}
columnFamily {
names = ["d"]
}
user {
agent = "osdu-user-agent"
}
}
memorystore {
host = "localhost"
port = 6388
message-channel = "unit-catalog-channel"
}
}
quartz {
schedules {
probeSchedule {
description = "A cron job that runs every 2 minutes"
expression = "0 * * ? * * *"
}
}
}
bootloader.class = org.opengroup.osdu.production.TestApp