Skip to content

Commit

Permalink
Merge pull request #723 from HumairAK/stable
Browse files Browse the repository at this point in the history
Update stable to v2.7.0
  • Loading branch information
HumairAK authored Oct 17, 2024
2 parents 3ded8a1 + 998859f commit 53336ab
Show file tree
Hide file tree
Showing 44 changed files with 690 additions and 158 deletions.
15 changes: 15 additions & 0 deletions .github/scripts/python_package_upload/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
FROM docker.io/python:3.9

# Set working directory
WORKDIR /app

# Copy the script into the container
COPY package_upload.sh /app/package_upload.sh

# Make sure the script is executable
RUN chmod +x /app/package_upload.sh

# Store the files in a folder
VOLUME /app/packages

ENTRYPOINT ["/app/package_upload.sh"]
37 changes: 8 additions & 29 deletions .github/scripts/python_package_upload/package_upload.sh
Original file line number Diff line number Diff line change
@@ -1,33 +1,12 @@
#!/usr/bin/env bash
#!/bin/bash

set -ex

kfp_directory=kfp
boto_directory=boto3

mkdir -p "$kfp_directory"
mkdir -p "$boto_directory"

# Download kfp package
pip download kfp==2.7.0 -d "$kfp_directory"

# Download boto3 package
pip download boto3 -d "$boto_directory"


# Print the pods in the namespace
oc -n test-pypiserver get pods

pod_name=$(oc -n test-pypiserver get pod | grep pypi | awk '{print $1}')

# Copy kfp packages
for kfp_entry in "$kfp_directory"/*; do
echo oc -n test-pypiserver cp "$kfp_entry" $pod_name:/opt/app-root/packages
oc -n test-pypiserver cp "$kfp_entry" $pod_name:/opt/app-root/packages
done

# Copy boto3 packages
for boto_entry in "$boto_directory"/*; do
echo oc -n test-pypiserver cp "$boto_entry" $pod_name:/opt/app-root/packages
oc -n test-pypiserver cp "$boto_entry" $pod_name:/opt/app-root/packages
# Download packages
for python_version in "3.9" "3.10" "3.11" "3.12"; do
for package in "kfp" "numpy"; do
# If we don't set the --python it will use the one from the computer that may not be the one that the
# pipeline is running
pip download $package -d packages --only-binary=:none: --python $python_version
done
done
18 changes: 18 additions & 0 deletions .github/scripts/python_package_upload/package_upload_run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
#!/bin/bash

set -ex

mkdir -p /tmp/packages
docker rm package_upload_run || true
docker build -t package_upload .
docker run --name package_upload_run -v /tmp/packages:/app/packages package_upload

# Print the pods in the namespace
oc -n test-pypiserver get pods

pod_name=$(oc -n test-pypiserver get pod | grep pypi | awk '{print $1}')

# Copy packages
for entry in /tmp/packages/*; do
oc -n test-pypiserver cp "$entry" $pod_name:/opt/app-root/packages
done
44 changes: 34 additions & 10 deletions .github/scripts/tests/tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ if [ "$GIT_WORKSPACE" = "" ]; then
echo "GIT_WORKSPACE variable not defined. Should be the root of the source code. Example GIT_WORKSPACE=/home/dev/git/data-science-pipelines-operator" && exit 1
fi

CLEANUP=false
CLEAN_INFRA=false
K8SAPISERVERHOST=""
DSPA_NAMESPACE="test-dspa"
DSPA_EXTERNAL_NAMESPACE="dspa-ext"
Expand All @@ -27,6 +27,7 @@ CONFIG_DIR="${GIT_WORKSPACE}/config"
RESOURCES_DIR_CRD="${GIT_WORKSPACE}/.github/resources"
OPENDATAHUB_NAMESPACE="opendatahub"
RESOURCES_DIR_PYPI="${GIT_WORKSPACE}/.github/resources/pypiserver/base"
ENDPOINT_TYPE="service"

get_dspo_image() {
if [ "$REGISTRY_ADDRESS" = "" ]; then
Expand Down Expand Up @@ -128,7 +129,7 @@ upload_python_packages_to_pypi_server() {
echo "---------------------------------"
echo "Upload Python Packages to pypi-server"
echo "---------------------------------"
( cd "${GIT_WORKSPACE}/.github/scripts/python_package_upload" && sh package_upload.sh )
( cd "${GIT_WORKSPACE}/.github/scripts/python_package_upload" && sh package_upload_run.sh )
}

create_dspa_namespace() {
Expand Down Expand Up @@ -163,14 +164,14 @@ run_tests() {
echo "---------------------------------"
echo "Run tests"
echo "---------------------------------"
( cd $GIT_WORKSPACE && make integrationtest K8SAPISERVERHOST=${K8SAPISERVERHOST} DSPANAMESPACE=${DSPA_NAMESPACE} DSPAPATH=${DSPA_PATH} )
( cd $GIT_WORKSPACE && make integrationtest K8SAPISERVERHOST=${K8SAPISERVERHOST} DSPANAMESPACE=${DSPA_NAMESPACE} DSPAPATH=${DSPA_PATH} ENDPOINT_TYPE=${ENDPOINT_TYPE} )
}

run_tests_dspa_external_connections() {
echo "---------------------------------"
echo "Run tests for DSPA with External Connections"
echo "---------------------------------"
( cd $GIT_WORKSPACE && make integrationtest K8SAPISERVERHOST=${K8SAPISERVERHOST} DSPANAMESPACE=${DSPA_EXTERNAL_NAMESPACE} DSPAPATH=${DSPA_EXTERNAL_PATH} )
( cd $GIT_WORKSPACE && make integrationtest K8SAPISERVERHOST=${K8SAPISERVERHOST} DSPANAMESPACE=${DSPA_EXTERNAL_NAMESPACE} DSPAPATH=${DSPA_EXTERNAL_PATH} ENDPOINT_TYPE=${ENDPOINT_TYPE} )
}

undeploy_kind_resources() {
Expand Down Expand Up @@ -232,8 +233,11 @@ while [ "$#" -gt 0 ]; do
TARGET="rhoai"
shift
;;
--cleanup)
CLEANUP=true
# The clean-infra option is helpful when rerunning tests on the same target environment, as it eliminates
# the need to manually delete the necessary infrastructure. By default, this setting is set to false.
# If true, before running the test, it delete the necessary infrastructure.
--clean-infra)
CLEAN_INFRA=true
shift
;;
--k8s-api-server-host)
Expand All @@ -249,7 +253,7 @@ while [ "$#" -gt 0 ]; do
--dspa-namespace)
shift
if [[ -n "$1" ]]; then
DSPANAMESPACE="$1"
DSPA_NAMESPACE="$1"
shift
else
echo "Error: --dspa-namespace requires a value"
Expand All @@ -269,13 +273,23 @@ while [ "$#" -gt 0 ]; do
--dspa-path)
shift
if [[ -n "$1" ]]; then
DSPAPATH="$1"
DSPA_PATH="$1"
shift
else
echo "Error: --dspa-path requires a value"
exit 1
fi
;;
--external-dspa-path)
shift
if [[ -n "$1" ]]; then
DSPA_EXTERNAL_PATH="$1"
shift
else
echo "Error: --external-dspa-path requires a value"
exit 1
fi
;;
--kube-config)
shift
if [[ -n "$1" ]]; then
Expand All @@ -286,6 +300,16 @@ while [ "$#" -gt 0 ]; do
exit 1
fi
;;
--endpoint-type)
shift
if [[ -n "$1" ]]; then
ENDPOINT_TYPE="$1"
shift
else
echo "Error: --endpoint-type requires a value [service, route]"
exit 1
fi
;;
*)
echo "Unknown command line switch: $1"
exit 1
Expand All @@ -299,12 +323,12 @@ if [ "$K8SAPISERVERHOST" = "" ]; then
fi

if [ "$TARGET" = "kind" ]; then
if [ "$CLEANUP" = true ] ; then
if [ "$CLEAN_INFRA" = true ] ; then
undeploy_kind_resources
fi
setup_kind_requirements
elif [ "$TARGET" = "rhoai" ]; then
if [ "$CLEANUP" = true ] ; then
if [ "$CLEAN_INFRA" = true ] ; then
remove_namespace_created_for_rhoai
fi
setup_rhoai_requirements
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/build-prs-trigger.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
echo ${{ github.event.pull_request.state }} >> ./pr/pr_state
echo ${{ github.event.pull_request.head.sha }} >> ./pr/head_sha
echo ${{ github.event.action }} >> ./pr/event_action
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
with:
name: pr
path: pr/
15 changes: 0 additions & 15 deletions .github/workflows/release_tests.yaml

This file was deleted.

2 changes: 1 addition & 1 deletion .github/workflows/release_trigger.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ jobs:
PR_STATE: ${{ github.event.pull_request.state }}
PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
run: ./.github/scripts/release_trigger/upload-data.sh
- uses: actions/upload-artifact@v4
- uses: actions/upload-artifact@v3
with:
name: pr
path: pr/
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,3 @@ Dockerfile.cross
__pycache__/
*.py[cod]
*$py.class
.github/scripts/python_package_upload/
3 changes: 2 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ KUBECONFIGPATH ?= $(HOME)/.kube/config
K8SAPISERVERHOST ?= http://localhost:6443
DSPANAMESPACE ?= default
DSPAPATH ?= resources/dspa-lite.yaml
ENDPOINT_TYPE ?= service

# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
Expand Down Expand Up @@ -130,7 +131,7 @@ functest: manifests generate fmt vet envtest ## Run tests.
.PHONY: integrationtest
integrationtest: ## Run integration tests
cd tests && \
go test ./... --tags=test_integration -v -kubeconfig=${KUBECONFIGPATH} -k8sApiServerHost=${K8SAPISERVERHOST} -DSPANamespace=${DSPANAMESPACE} -DSPAPath=${DSPAPATH}
go test ./... --tags=test_integration -v -kubeconfig=${KUBECONFIGPATH} -k8sApiServerHost=${K8SAPISERVERHOST} -DSPANamespace=${DSPANAMESPACE} -DSPAPath=${DSPAPATH} -endpointType=${ENDPOINT_TYPE}

##@ Build

Expand Down
12 changes: 8 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ Data Science Pipeline stacks onto individual OCP namespaces.
- [Pre-requisites](#pre-requisites)
- [Deploy the Operator via ODH](#deploy-the-operator-via-odh)
- [Using a development image](#using-a-development-image)
- [Deploy the Operator standalone](#deploy-the-operator-standalone)
- [Deploy the standalone_Operator](#deploy-the-standalone-operator)
- [Deploy DSP instance](#deploy-dsp-instance)
- [Deploy another DSP instance](#deploy-another-dsp-instance)
- [Deploy a DSP with custom credentials](#deploy-a-dsp-with-custom-credentials)
Expand Down Expand Up @@ -64,13 +64,15 @@ To get started you will first need to satisfy the following pre-requisites:
3. Based on which DSP version to install you will need to do the following:
1. For DSPv1: The OpenShift Cluster must have OpenShift Pipelines 1.8 or higher installed. We recommend channel pipelines-1.8
on OCP 4.10 and pipelines-1.9 or pipelines-1.10 for OCP 4.11, 4.12 and 4.13. Instructions [here][OCP Pipelines Operator].
2. For DSPv2: The OpenShift Cluster must be Argo Workflows installed. You can follow the steps listed in the standalone deployment section [here](#deploy-the-operator-standalone).
2. For DSPv2: The DSPO will install a namespace-scoped Argo Workflow Controller, so ensure the OpenShift Cluster does not have a cluster-scoped Argo Workflows installation already present.
4. Based on installation type you will need one of the following:
1. For Standalone method: You will need to have [Kustomize] version 4.5+ installed
2. For ODH method: The Open Data Hub operator needs to be installed. You can install it via [OperatorHub][installodh].

## Deploy the Operator via ODH

Use this method to deploy DSPO using the OpenDataHub operator

On a cluster with ODH installed, create a namespace where you would like to install DSPO:
Deploy the following `DataScienceCluster`:

Expand Down Expand Up @@ -140,7 +142,9 @@ spec:
sourcePath: base
```
## Deploy the Operator standalone
## Deploy the standalone Operator
Use this method to deploy DSPO **without** OpenDataHub operator
First clone this repository:
Expand Down Expand Up @@ -604,7 +608,7 @@ The specific tests that are executed when you run `make test` can include unit t

**To deploy DSPO as a developer :**

Follow the instructions from [here](#deploy-the-operator-standalone) to deploy the operator standalone.
Follow the instructions from [here](#deploy-the-standalone-operator) to deploy the standalone operator.

Follow the instructions from [here](#deploy-the-operator-via-odh) to deploy the operator via ODH.

Expand Down
2 changes: 2 additions & 0 deletions config/argo/clusterrole.argo-aggregate-to-admin.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: argo-aggregate-to-admin
annotations:
internal.kpt.dev/upstream-identifier: "rbac.authorization.k8s.io|ClusterRole|default|argo-aggregate-to-admin"
rules:
- apiGroups:
- argoproj.io
Expand Down
2 changes: 2 additions & 0 deletions config/argo/clusterrole.argo-aggregate-to-edit.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: argo-aggregate-to-edit
annotations:
internal.kpt.dev/upstream-identifier: "rbac.authorization.k8s.io|ClusterRole|default|argo-aggregate-to-edit"
rules:
- apiGroups:
- argoproj.io
Expand Down
2 changes: 2 additions & 0 deletions config/argo/clusterrole.argo-aggregate-to-view.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: argo-aggregate-to-view
annotations:
internal.kpt.dev/upstream-identifier: "rbac.authorization.k8s.io|ClusterRole|default|argo-aggregate-to-view"
rules:
- apiGroups:
- argoproj.io
Expand Down
3 changes: 2 additions & 1 deletion config/argo/clusterrole.argo-cluster-role.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: argo-cluster-role
annotations:
internal.kpt.dev/upstream-identifier: "rbac.authorization.k8s.io|ClusterRole|default|argo-cluster-role"
rules:
- apiGroups:
- ""
Expand Down
41 changes: 41 additions & 0 deletions config/argo/crd.workflowartifactgctasks.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: workflowartifactgctasks.argoproj.io
spec:
group: argoproj.io
names:
kind: WorkflowArtifactGCTask
listKind: WorkflowArtifactGCTaskList
plural: workflowartifactgctasks
shortNames:
- wfat
singular: workflowartifactgctask
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
properties:
apiVersion:
type: string
kind:
type: string
metadata:
type: object
spec:
type: object
x-kubernetes-map-type: atomic
x-kubernetes-preserve-unknown-fields: true
status:
type: object
x-kubernetes-map-type: atomic
x-kubernetes-preserve-unknown-fields: true
required:
- metadata
- spec
type: object
served: true
storage: true
subresources:
status: {}
Loading

0 comments on commit 53336ab

Please sign in to comment.