Skip to content
Snippets Groups Projects
Commit ca9b4f50 authored by Shane Hutchins's avatar Shane Hutchins
Browse files

Merge branch 'remove_admincli' into 'master'

Remove AdminCLI from PolicyService

See merge request !261
parents 16fd5599 7437cd97
No related branches found
No related tags found
3 merge requests!299Update Dev branch,!298Securityfix,!261Remove AdminCLI from PolicyService
Pipeline #148676 failed
Showing
with 1 addition and 2628 deletions
FROM python:3.9-slim
RUN useradd --create-home --shell /bin/bash app_user
WORKDIR /home/app_user
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
USER app_user
COPY . .
CMD ["bash"]
IMAGE_NAME=policy-service-admincli
TAG=latest
.PHONY: build clean build_docker run test
default: build
POLICY_LOCAL_URL=http://host.docker.internal:8080
version := $(./pol.py --version)
build:
pyinstaller --onefile pol.py
clean:
- rm -fr dist build pol.spec *.csv *.xlsx *.rego *.json *.las
- docker rm -f $(IMAGE_NAME)
build_docker:
docker build --network host -t $(IMAGE_NAME):$(TAG) -f Dockerfile .
run_local:
docker run -it --rm \
-e DATA_PARTITION=${DATA_PARTITION} \
-e TOKEN="${TOKEN}" \
-e POLICY_URL=$(POLICY_LOCAL_URL) \
-e ENTITLEMENTS_URL=${ENTITLEMENTS_URL} \
-e LEGAL_URL=${LEGAL_URL} \
--name $(IMAGE_NAME) $(IMAGE_NAME):$(TAG)
run:
docker run -it --rm \
-e DATA_PARTITION=${DATA_PARTITION} \
-e TOKEN="${TOKEN}" \
-e POLICY_URL=${POLICY_URL} \
-e ENTITLEMENTS_URL=${ENTITLEMENTS_URL} \
-e LEGAL_URL=${LEGAL_URL} \
--name $(IMAGE_NAME) $(IMAGE_NAME):$(TAG)
scan:
docker scan $(IMAGE_NAME):$(TAG)
# requires PYTHONPATH to be set to admincli root directory
tests: test
test:
python3.9 -m pytest -v
test_eval:
python3.9 -m pytest -v -k "eval"
test_storage:
python3.9 -m pytest -v -k "storage"
test_other:
python3.9 -m pytest -v -k "not policy"
demo: END_RECORDING := "Thanks for watching"
demo:
- ./pol.py add -f ../../app/tests/templates/search2.rego search2 -t --force
- ./pol.py add -f ../../app/tests/templates/search_deny.rego search_deny -t --force
- ./pol.py opa-add -f ../../app/tests/instance_data/dataauthz.rego osdu/instance/dataauthz.rego --force
- ./pol.py opa-add -f tests/example.rego --force
- rm docs/admincli.cast
asciinema-rec_script docs/admincli.asc --title="AdminCLI"
load_data:
python3.9 ./pol.py opa-add -f ../../app/tests/instance_data/dataauthz.rego osdu/instance/dataauthz.rego --force
python3.9 ./pol.py opa-add -f ../../app/tests/instance_data/entitlements.rego osdu/instance/entitlements.rego --force
python3.9 ./pol.py opa-add -f ../../app/tests/instance_data/legal.rego osdu/instance/legal.rego --force
python3.9 ./pol.py opa-add -f ../../app/tests/templates/dataauthz.rego.template osdu/partition/osdu/dataauthz.rego -t --force
# Policy Service - Admin CLI
The Admin CLI is an easy to use full featured CLI.
This AdminCLI was new in OSDU Milestone 14. It was ready for production use then.
Now with M15 it's even better. As always, please report any issues.
##### For help:
* General help: `pol.py --help`
* Individual command help is also available, for example: `pol.py ls --help`
##### The main policy commands are:
* `add` for adding or updating policies. This is particular useful for automation and loading policies into OSDU partitions,
* `eval` for evaluating policies,
* `ls` for listing and retrieving policies and
* `rm` for deleting policies
* `health` - retrieves health status of policy service
##### Additional Policy Developer Utils
* `compile` Use OPA's Compile API to partially evaluate a query.
* `config` Diagnostic config on Policy Service.
* `diff` Compare two policies, show the delta in a context diff format.
* `opa-add` Add or update a policy directly in OPA ✨ for LOCAL testing/development
* `opa-rm` delete a policy directly from OPA. 🔥 for LOCAL testing/development
* `translate` For helping testing translate which is used by search service via os-core-common
##### Additional Utils/commands include:
* `groups` - Showing groups related to your auth context,
* `info` - Retrieves info from services,
* `legal-tags` - Get legal tags from legal tag service,
* `search` - Search Utility
* `storage` - Storage and Dataset record retrieval utility
You will need to set the following environmental variables or provide details on command line:
* `TOKEN` or `--token`
* `DATA_PARTITION` or `--data-partition-id`
* `BASE_URL` or `--base-url` or `--url` or `--host`
See [setenv.sh](setenv.sh) for an example of these.
Please note: *command-line options will override environment variables.*
Command line completion is available:
* `--install-completion`
* `--show-completition`
##### Development / Testing Notes:
Individual services can be optionally redirected for development, testing or custom environments:
* `POLICY_URL` or `--policy-url`
* `ENTITLEMENTS_URL` or `--ent-url`
* `STORAGE_URL` or `--storage-url`
* `SEARCH_URL` or `--search-url`
* `LEGAL_URL` or `--legal-url`
* `DATASET_URL` or `--dataset-url`
These are not required. See [setenv.sh](setenv.sh) for an example of these.
Please note: *command-line options will override environment variables.*
Errors during collecting phase are generally issues with TOKEN.
##### Built-in Template Engine
Add, eval and translate support a templating engine that makes it super easy to automate. Hopefully you find this helpful.
When using --template option the strings in our file input will be replaced, for example:
* `${data_partition}` will be replaced by the data partition id you are using
* `${name}` will be replaced by the policy id you selected in the command
See the individual command's help for template support details.
In future releases a compiled version of the CLI may be made available.
##### Output options for commands:
Some commands support multiple output options for example `pol.py ls --output=<>` or `pol.py search --output=<>
* fancy (default with colors and formatting)
* simple
* excel (supported only on search currently)
* tree (tree output of policies)
If the policy admincli detects there isn't a tty simple should automatically be selected to make automation easier.
In addition many commands support feature to get raw json return from OSDU service using the `--raw` option.
##### Searching:
Search uses standard Regular Expressions (shortened as regex or regexp), sometimes referred to as rational expressions - a sequence of characters that specifies a search pattern in text. This is extremely powerful search facility.
Most letters and characters will simply match themselves. For example `test` will match the string `test` exactly. There are exceptions to this rule; some characters are special metacharacters, and don’t match themselves. Instead, they signal that some out-of-the-ordinary thing should be matched, or they affect other portions of the regex by repeating them or changing their meaning.
These include: `. ^ $ * + ? { } [ ] \ | ( )`
The `ls` command supports two kinds of searching for policies:
* `--search` which takes a REGEX search string. This will search in both policy name and the stored policy rego.
* `--name` which takes a REGEX search string will only search in policy names.
##### Force Option:
Some commands `add`, `eval` and `translate` will be default ask for comfirmation when changing data or using templating. You can bypass this by using option `--force` which is great for automation.
**The policy service team would appreciate any feedback and feature requests on the AdminCLI.**
##### Building executables:
`make build` should build executable for your architecture in dist directory. Be sure to use python 3.9.x
##### Building container and running container:
* `make docker_build`
* `make run` - you will still need to set the environment variables
* `make run_local` - you will still need to set the environment variables but this will override POLICY_URL to point to your local machine via docker host.
####
[![Demo](https://asciinema.org/a/4RMvvcjmReh1qZmoNu0FIfsra.svg)](https://asciinema.org/a/4RMvvcjmReh1qZmoNu0FIfsra?speed=0.75)
The AdminCLI has been moved to [it's own repo](https://community.opengroup.org/osdu/ui/admincli)
import os
import requests
import subprocess
import tempfile
class OpaCheck:
fpath = None
def __init__(self):
self.tempdir = tempfile.mkdtemp()
def createfile(self, rego: str, filename: str):
self.fpath = os.path.join(self.tempdir, filename)
f = open(self.fpath, "w")
f.write(rego)
f.close()
def opa_check_file(self):
try:
result = subprocess.run(['opa', 'check', self.fpath], capture_output=True, text=True)
except FileNotFoundError as err:
os.remove(self.fpath)
return(-1,"OPA Binary not found")
os.remove(self.fpath)
return(result.returncode, result.stderr)
def check(self, rego: str, filename: str):
self.createfile(rego, filename)
return self.opa_check_file()
def delete(self):
os.rmdir(self.tempdir)
def put_opa_policy_direct(policy_id, data, base_url, timeout=20):
"""
Put a policy in OPA directly - for development/LOCAL use
"""
url = base_url + '/v1/policies/' + policy_id
try:
rsp = requests.put(url, data=data, headers={'Content-Type': 'application/octet-stream'}, timeout=timeout)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
return rsp
def delete_opa_policy_direct(policy_id, base_url, timeout=20):
"""
Put a policy in OPA directly - for development/LOCAL use
"""
url = base_url + '/v1/policies/' + policy_id
try:
rsp = requests.delete(url, headers={'Content-Type': 'application/octet-stream'}, timeout=timeout)
except requests.exceptions.RequestException as e:
raise SystemExit(e)
return rsp
\ No newline at end of file
aws-python-admincli-test:
extends:
- .aws
- .aws_common_variables
- .aws_variables
image: $CI_REGISTRY/osdu/platform/deployment-and-operations/base-containers-aws/aws-python/aws-python:v1.0-py3.9
stage: integration
tags: ['osdu-small']
needs: ['aws-test-python']
script:
- cd frontend/admincli
- chmod +x ./run-integration-tests.sh
- ./run-integration-tests.sh -c aws
allow_failure: true
rules:
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
- if: $CI_COMMIT_BRANCH == 'dev'
when: manual
- if: $CI_PIPELINE_SOURCE == "schedule"
when: never
This diff is collapsed.
# Rego
This directory contains code from from https://github.com/open-policy-agent/rego-python
Under the Apache License Version 2.0, January 2004
import json
class QuerySet(object):
def __init__(self, queries):
self.queries = queries
@classmethod
def from_data(cls, data):
return cls([Query.from_data(q) for q in data])
def __str__(self):
return (
self.__class__.__name__
+ "("
+ ", ".join(q.__class__.__name__ + "(" + str(q) + ")" for q in self.queries)
+ ")"
)
class Query(object):
def __init__(self, exprs):
self.exprs = exprs
@classmethod
def from_data(cls, data):
return cls([Expr.from_data(e) for e in data])
def __str__(self):
return "; ".join(str(e) for e in self.exprs)
class Expr(object):
def __init__(self, terms):
self.terms = terms
@property
def operator(self):
if not self.is_call():
raise ValueError("not a call expr")
return self.terms[0]
@property
def operands(self):
if not self.is_call():
raise ValueError("not a call expr")
return self.terms[1:]
def is_call(self):
return not isinstance(self.terms, Term)
def op(self):
return ".".join([str(t.value.value) for t in self.operator.value.terms])
@classmethod
def from_data(cls, data):
terms = data["terms"]
if isinstance(terms, dict):
return cls(Term.from_data(terms))
return cls([Term.from_data(t) for t in terms])
def __str__(self):
if self.is_call():
return (
str(self.operator)
+ "("
+ ", ".join(str(o) for o in self.operands)
+ ")"
)
return str(self.terms)
class Term(object):
def __init__(self, value):
self.value = value
@classmethod
def from_data(cls, data):
if data["type"] == "null":
data["value"] = None
return cls(_VALUE_MAP[data["type"]].from_data(data["value"]))
def __str__(self):
return str(self.value)
class Scalar(object):
def __init__(self, value):
self.value = value
@classmethod
def from_data(cls, data):
return cls(data)
def __str__(self):
return json.dumps(self.value)
class Var(object):
def __init__(self, value):
self.value = value
@classmethod
def from_data(cls, data):
return cls(data)
def __str__(self):
return str(self.value)
class Ref(object):
def __init__(self, terms):
self.terms = terms
def operand(self, idx):
return self.terms[idx]
@classmethod
def from_data(cls, data):
return cls([Term.from_data(x) for x in data])
def __str__(self):
return str(self.terms[0]) + "".join("[" + str(t) + "]" for t in self.terms[1:])
class Array(object):
def __init__(self, terms):
self.terms = terms
@classmethod
def from_data(cls, data):
return cls([Term.from_data(x) for x in data])
def __str__(self):
return "[" + ",".join(str(x) for x in self.terms) + "]"
class Set(object):
def __init__(self, terms):
self.terms = terms
@classmethod
def from_data(cls, data):
return cls([Term.from_data(x) for x in data])
def __str__(self):
if len(self.terms) == 0:
return "set()"
return "{" + ",".join(str(x) for x in self.terms) + "}"
class Object(object):
def __init__(self, *pairs):
self.pairs = pairs
@classmethod
def from_data(cls, data):
return cls(*[(Term.from_data(p[0]), Term.from_data(p[1])) for p in data])
def __str__(self):
return "{" + ",".join({str(x): str(y) for (x, y) in self.pairs}) + "}"
class Call(object):
def __init__(self, terms):
self.terms = terms
@classmethod
def from_data(cls, data):
return cls([Term.from_data(x) for x in data])
@property
def operator(self):
return self.terms[0]
@property
def operands(self):
return self.terms[1:]
def op(self):
return ".".join([str(t.value.value) for t in self.operator.value.terms])
def __str__(self):
return str(self.operator) + "(" + ", ".join(str(o) for o in self.operands) + ")"
class ArrayComprehension(object):
def __init__(self, term, body):
self.term = term
self.body = body
@classmethod
def from_data(cls, data):
return cls(Term.from_data(data["term"]), Query.from_data(data["body"]))
def __str__(self):
return "[" + str(self.term) + " | " + str(self.body) + "]"
class SetComprehension(object):
def __init__(self, term, body):
self.term = term
self.body = body
@classmethod
def from_data(cls, data):
return cls(Term.from_data(data["term"]), Query.from_data(data["body"]))
def __str__(self):
return "{" + str(self.term) + " | " + str(self.body) + "}"
class ObjectComprehension(object):
def __init__(self, key, value, body):
self.key = key
self.value = value
self.body = body
@classmethod
def from_data(cls, data):
return cls(
Term.from_data(data["key"]),
Term.from_data(data["value"]),
Query.from_data(data["body"]),
)
def __str__(self):
return (
"{" + str(self.key) + ":" + str(self.value) + " | " + str(self.body) + "}"
)
def is_comprehension(x):
"""Returns true if this is a comprehension type."""
return isinstance(x, (ObjectComprehension, SetComprehension, ArrayComprehension))
_VALUE_MAP = {
"null": Scalar,
"boolean": Scalar,
"number": Scalar,
"string": Scalar,
"var": Var,
"ref": Ref,
"array": Array,
"set": Set,
"object": Object,
"call": Call,
"objectcomprehension": ObjectComprehension,
"setcomprehension": SetComprehension,
"arraycomprehension": ArrayComprehension,
}
import json
from rich.console import Console
from rego import ast
console = Console()
def walk(node, vis):
next = vis(node)
if next is None:
return
if isinstance(node, ast.QuerySet):
for q in node.queries:
walk(q, next)
elif isinstance(node, ast.Query):
for e in node.exprs:
walk(e, next)
elif isinstance(node, ast.Expr):
if node.is_call():
walk(node.operator, next)
for o in node.operands:
walk(o, next)
else:
walk(node.terms, next)
elif isinstance(node, ast.Term):
walk(node.value, next)
elif isinstance(node, (ast.Ref, ast.Array, ast.Set, ast.Call)):
for t in node.terms:
walk(t, next)
elif isinstance(node, ast.Object):
for p in node.pairs:
walk(p[0], next)
walk(p[1], next)
elif isinstance(node, ast.ObjectComprehension):
walk(node.key, next)
walk(node.value, next)
walk(node.body, next)
elif isinstance(node, (ast.SetComprehension, ast.ArrayComprehension)):
walk(node.term, next)
walk(node.body, next)
def pretty_print(node):
class printer(object):
def __init__(self, indent):
self.indent = indent
def __call__(self, node):
if isinstance(node, ast.Scalar):
name = node.__class__.__name__ + "(" + json.dumps(node.value) + ")"
elif isinstance(node, ast.Var):
name = node.__class__.__name__ + "(" + node.value + ")"
else:
name = node.__class__.__name__
console.print(" " * self.indent + name)
return printer(self.indent + 2)
vis = printer(0)
walk(node, vis)
XlsxWriter == 3.0.3
pytest == 7.1.3
requests == 2.25.1
rich == 12.6.0
typer == 0.6.1
uuid7 == 0.1.0
boto3
jwt
# Automatically generated by https://github.com/damnever/pigar.
XlsxWriter == 3.0.3
requests == 2.25.1
rich == 12.6.0
typer == 0.6.1
uuid7 == 0.1.0
#!/usr/bin/env bash
# USAGE:
# -c CLOUD_PROVIDER
while getopts c: flag
do
case "${flag}" in
c) CLOUD_PROVIDER=${OPTARG};;
esac
done
python3 --version
python3 -m venv env
source env/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements.txt
python3 -m pip install -r requirements-dev.txt
# AWS
if [ ${CLOUD_PROVIDER} == "aws" ]; then
export BASE_URL=$AWS_BASE_URL
export TOKEN=$(python3 ../../app/tests/aws/aws_jwt_client.py)
elif [ ${CLOUD_PROVIDER} == "gcp" ]; then
export BASE_URL=$HOST
export TOKEN=$(python3 ../../app/tests/anthos/anthos_jwt_client.py)
elif [ ${CLOUD_PROVIDER} == "azure" ]; then
export BASE_URL=$AZURE_DNS_NAME
export TOKEN=$(curl -ks -XPOST "https://login.microsoftonline.com/${AZURE_TENANT_ID}/oauth2/token" -d "grant_type=client_credentials&client_id=${AZURE_PRINCIPAL_ID}&client_secret=${AZURE_PRINCIPAL_SECRET}&resource=${AZURE_APP_ID}" | jq --raw-output '.access_token')
elif [ ${CLOUD_PROVIDER} == "ibm" ]; then
export BASE_URL=$IBM_POLICY_BASE_URL
export TOKEN=$svctoken
else
echo "Not supported cloud provider ${CLOUD_PROVIDER}"
exit 1
fi
if [ -z $TOKEN ]; then
echo "[ERROR] Not able to get token (TOKEN)"
exit 1
fi
echo DATA_PARTITION $DATA_PARTITION
echo CLOUD_PROVIDER: $CLOUD_PROVIDER
echo BASE_URL: $BASE_URL
python3 pol.py setup
python3 -m pytest -v
from rich.table import Table
from rich.console import Console
import xlsxwriter
import os, sys, subprocess
import typer
import random
console = Console()
error_console = Console(stderr=True, style="bold red")
def display_search_results_excel(data,
show_kind = False,
excelfile = "search.xlsx",
worksheetname = "Search Results"):
# Workbook() takes one, non-optional, argument
# which is the filename that we want to create.
workbook = xlsxwriter.Workbook(excelfile)
# The workbook object is then used to add new
# worksheet via the add_worksheet() method.
worksheet = workbook.add_worksheet(worksheetname)
# Create a new Format object to formats cells
# in worksheets using add_format() method.
# here we create bold format object.
bold = workbook.add_format({'bold': 1})
headings = ['ID', "Kind", "Authority", "Source", "Type", "Create Time", "Create User"]
# Write a row of data starting from 'A1' with bold format
worksheet.write_row('A1', headings, bold)
# Start from the first cell below the headers.
row = 1
col = 0
for item in data["results"]:
worksheet.write_row(row=row, col=0, data=(
item['id'], item["kind"], item["authority"], item["source"], item["type"], item["createTime"], item["createUser"]
))
row += 1
worksheet.set_column(0, 0, 50) # Column A width set to 20.
worksheet.set_column(1, 1, 30) # Columns B width set to 30.
worksheet.set_column(2, 3, 8) # Columns C-D width set to 8.
worksheet.set_column(4, 4, 20) # Columns width set to 25.
worksheet.set_column(5, 6, 25) # Columns width set to 25.
# Finally, close the Excel file
workbook.close()
console.print(f"Search results ({row-1} records) saved as '{excelfile}'")
def display_search_results_simple(data):
for item in data["results"]:
created = item["createTime"]
print(item['id'], item["kind"], item["authority"], item["source"], item["type"], created)
def display_search_results_idonly(data):
for item in data["results"]:
print(item['id'])
def display_search_results_idonly_random(data):
lucky = random.choice(data["results"])
print(lucky['id'])
def display_search_results_fancy(data, show_kind=False):
count=len(data["results"])
if not count:
return
table = Table(title=f"Search Results ({count})")
table.add_column("ID", justify="full", style="cyan", no_wrap=True, min_width=25)
if show_kind:
table.add_column("Kind", justify="full", style="cyan", no_wrap=True, min_width=25)
table.add_column("Authority", justify="center", style="green", no_wrap=True, max_width=10)
table.add_column("Source", justify="center", style="green", no_wrap=True, max_width=8)
table.add_column("Type", justify="center", style="green", no_wrap=True, min_width=15)
table.add_column("Created", justify="center", style="green", no_wrap=True, max_width=11)
for item in data["results"]:
created = item["createTime"]
if show_kind:
table.add_row(item['id'], item["kind"], item["authority"], item["source"], item["type"], created)
else:
table.add_row(item['id'], item["authority"], item["source"], item["type"], created)
console.print(table)
def open_file(filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
def search_order_callback(ctx: typer.Context, value: str):
if value != "ASC" and value != 'DESC':
raise typer.BadParameter("Only ASC or DESC is allowed")
return value
\ No newline at end of file
# Example env var settings
# load by . ./setenv
export DATA_PARTITION=osdu
export TOKEN="$(gcloud auth print-access-token)"
export BASE_URL="https://policy-dev.osdu.lol"
export POLICY_URL="http://localhost:8080"
#export ENTITLEMENTS_URL="https://policy-dev.osdu.lol/api/entitlements/v2"
#export LEGAL_URL="https://policy-dev.osdu.lol/api/legal/v1"
#export STORAGE_URL="https://policy-dev.osdu.lol/api/storage/v2"
#export SEARCH_URL="https://policy-dev.osdu.lol/api/search/v2"
# Example env var settings using service account in gcp
# load by . ./setenv
export DATA_PARTITION=osdu
export TOKEN="$(gcloud auth print-access-token --impersonate-service-account=$SERVICE_ACCOUNT)"
export POLICY_URL=$GCP_BASE_URL
export ENTITLEMENTS_URL=$GCP_BASE_URL/api/entitlements/v2/groups
export LEGAL_URL=$GCP_BASE_URL/api/legal/v1/legaltags
# conftest.py
import pytest
import os
"""
"""
def pytest_addoption(parser):
parser.addoption("--data_partition", action="store", help="data-partition-id to use")
@pytest.fixture(scope='session')
def data_partition(request):
data_partition_val = request.config.option.data_partition
if data_partition_val is None:
data_partition_val = os.getenv('DATA_PARTITION')
if data_partition_val is None:
data_partition_val = "osdu"
return data_partition_val
"""
# NUM_SEARCH_STORAGE_DATASET_TESTS
Number of times to go thru workflow -
Call to search, get a random ID from that
Look up that random ID in search
Look up that random ID in dataset
Attempt to download the file in that random ID
"""
NUM_SEARCH_STORAGE_DATASET_TESTS=25
"""
# RANDOM_SEARCH_LMIT
When calling search to get a random ID,
How many records should we pull before
selecting one of them.
"""
RANDOM_SEARCH_LMIT=250
{
"name": "opendes-dz-test",
"description": "Legal Tag added for Well - updated",
"properties": {
"countryOfOrigin": [
"US",
"CA"
],
"contractId": "123457",
"expirationDate": "2025-12-26",
"originator": "Schlumberger",
"dataType": "Third Party Data",
"securityClassification": "Private",
"personalData": "No Personal Data",
"exportClassification": "EAR99",
"extensionProperties": {
"AgreementIdentifier": "dz-test",
"EffectiveDate": "2022-06-01T00:00:00",
"TerminationDate": "2099-12-31T00:00:00",
"AffiliateEnablementIndicator": true,
"AgreementParties": [
{
"AgreementPartyType": "EnabledAffiliate",
"AgreementParty": "Shell RDS"
}
]
}
}
}
{
"input": {
"operation": "update",
"records": [
{
"id":"${data_partition}:test:1.4.1654807204111",
"kind":"${data_partition}:bulkupdate:test:1.1.1654807204111",
"legal":{
"legaltags":[
"${legal_tag}"
],
"otherRelevantDataCountries":["US"],
"status":"compliant"
},
"acls":{
"viewers":["data.default.viewers@${data_partition}.${domain}"],
"owners":["data.default.owners@${data_partition}.${domain}"]
}
}
]
}
}
package osdu.partition["${data_partition}"].${name}
allow {
input.subject.clearance_level >= data.reports[_].clearance_level
}
{
"name": "${name}",
"description": "A legal tag for testing AdminCLI",
"properties": {
"countryOfOrigin": [
"US"
],
"contractId": "A1234",
"expirationDate": "${expiration_date}",
"originator": "Default",
"dataType": "Public Domain Data",
"securityClassification": "Public",
"personalData": "No Personal Data",
"exportClassification": "EAR99"
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment