diff --git a/.github/actions/kind/action.yml b/.github/actions/kind/action.yml new file mode 100644 index 00000000..19623952 --- /dev/null +++ b/.github/actions/kind/action.yml @@ -0,0 +1,58 @@ +name: "Set up KinD" +description: "Step to start and configure KinD cluster" + +runs: + using: "composite" + steps: + - name: Init directories + shell: bash + run: | + TEMP_DIR="$(pwd)/tmp" + mkdir -p "${TEMP_DIR}" + echo "TEMP_DIR=${TEMP_DIR}" >> $GITHUB_ENV + + mkdir -p "$(pwd)/bin" + echo "$(pwd)/bin" >> $GITHUB_PATH + + - name: Container image registry + shell: bash + run: | + podman run -d -p 5000:5000 --name registry registry:2.8.1 + + export REGISTRY_ADDRESS=$(hostname -i):5000 + echo "REGISTRY_ADDRESS=${REGISTRY_ADDRESS}" >> $GITHUB_ENV + echo "Container image registry started at ${REGISTRY_ADDRESS}" + + KIND_CONFIG_FILE=${{ env.TEMP_DIR }}/kind.yaml + echo "KIND_CONFIG_FILE=${KIND_CONFIG_FILE}" >> $GITHUB_ENV + envsubst < .github/resources/kind/kind.yaml > ${KIND_CONFIG_FILE} + + sudo --preserve-env=REGISTRY_ADDRESS sh -c 'cat > /etc/containers/registries.conf.d/local.conf < ${{ env.TEMP_DIR }}/catalogsource.yaml + envsubst < ${{ env.RESOURCES_DIR }}/olm/subscription.yaml > ${{ env.TEMP_DIR }}/subscription.yaml + + kubectl create -f ${{ env.TEMP_DIR }}/catalogsource.yaml + + echo Wait for CatalogSource ${{ env.CATALOG_SOURCE_NAME }} to start + kubectl wait -n ${{ env.CATALOG_SOURCE_NAMESPACE }} catalogsource/${{ env.CATALOG_SOURCE_NAME }} --for=jsonpath='{.status.connectionState.lastObservedState}'=READY --timeout=300s + + kubectl create -f ${{ env.TEMP_DIR }}/subscription.yaml + + echo Waiting for Subscription to be ready + kubectl wait -n ${{ env.SUBSCRIPTION_NAMESPACE }} subscription/${{ env.SUBSCRIPTION_NAME }} --for=jsonpath='{.status.state}'=AtLatestKnown --timeout=180s + + echo Waiting for Deployment to be ready + timeout 60 bash -c 'until [[ $(kubectl get deployment/opendatahub-operator-controller-manager -n '${{ env.SUBSCRIPTION_NAMESPACE }}') ]]; do sleep 5 && echo "$(kubectl get deployment/opendatahub-operator-controller-manager -n '${{ env.SUBSCRIPTION_NAMESPACE }}')"; done' + kubectl wait -n ${{ env.SUBSCRIPTION_NAMESPACE }} deployment/opendatahub-operator-controller-manager --for=condition=Available=true --timeout=60s + env: + CATALOG_SOURCE_NAME: "odh-olm-test" + CATALOG_SOURCE_NAMESPACE: "olm" + SUBSCRIPTION_NAME: "opendatahub-operator" + SUBSCRIPTION_NAMESPACE: "openshift-operators" + + - name: Create Namespace for DSPO + run: | + kubectl create namespace ${{ env.DSPO_NS }} + kubectl config set-context --current --namespace=${{ env.DSPO_NS }} + env: + DSPO_NS: data-science-pipelines-operator + + - name: Prepare for Upgrade Testing + run: | + # Update the KfDef manifest with the latest released version + sed -i "s/main/${{ inputs.released-version }}/" ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml + working-directory: ${{ env.RESOURCES_DIR }}/kfdef + + - name: Print KfDef Manifest Contents + run: cat ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml + + - name: Deploy DSPO KfDef + run: | + kubectl apply -f ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml -n ${{ env.DSPO_NS }} + env: + DSPO_NS: "data-science-pipelines-operator" + + - name: Print ODH Operator Pod Logs + run: kubectl get pods -n openshift-operators -o jsonpath='{.items[*].metadata.name}' | xargs -I {} kubectl logs -n openshift-operators {} + + - name: Wait for DSPO deployment to be ready + run: | + kubectl get deployments -n ${{ env.DSPO_NS }} + timeout 300 bash -c 'until [[ $(kubectl get deployment/data-science-pipelines-operator-controller-manager -n '${{ env.DSPO_NS }}') ]]; do sleep 5 && echo "$(kubectl get deployment/data-science-pipelines-operator-controller-manager -n '${{ env.DSPO_NS }}')"; done' + env: + DSPO_NS: data-science-pipelines-operator + + - name: Run pre-upgrade tests + run: | + ./tests/upgrades/main.sh + env: + DSPA_NS: test-ds-project-1 + WORKING_DIR: ${{ github.workspace }} + + - name: Prepare for Upgrade Testing + run: | + # Update the KfDef manifest with the candidate version + sed -i "s/${{ inputs.released-version }}/${{ inputs.candidate-version }}/" ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml + working-directory: ${{ env.RESOURCES_DIR }}/kfdef + + - name: Print KfDef Manifest Contents + run: cat ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml + + - name: Deploy KfDef Core for the candidate DSP Version + run: | + kubectl apply -f ${{ env.RESOURCES_DIR }}/kfdef/kfdef.yaml -n ${{ env.DSPO_NS }} + env: + DSPO_NS: data-science-pipelines-operator + + - name: Run upgrade tests + run: | + ./tests/upgrades/main.sh + env: + DSPA_NS: test-ds-project-2 diff --git a/.gitignore b/.gitignore index 6a4ec217..5b64e0f9 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,8 @@ Dockerfile.cross *.code-workspace *.vscode +.DS_Store + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 16a78d2a..ba68a441 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,6 +25,5 @@ repos: hooks: - id: go-fmt - id: golangci-lint - - id: go-unit-tests - id: go-build - id: go-mod-tidy diff --git a/Dockerfile b/Dockerfile index 2a2ae627..dd9b9b24 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM registry.access.redhat.com/ubi8/go-toolset:1.18.9-8 as builder +FROM registry.access.redhat.com/ubi8/go-toolset:1.19.10 as builder ARG TARGETOS ARG TARGETARCH diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index ce211113..cd7e9f74 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -80,8 +80,24 @@ type APIServer struct { EnableSamplePipeline bool `json:"enableSamplePipeline"` // +kubebuilder:default:=true // +kubebuilder:validation:Optional - AutoUpdatePipelineDefaultVersion bool `json:"autoUpdatePipelineDefaultVersion"` - Resources *ResourceRequirements `json:"resources,omitempty"` + AutoUpdatePipelineDefaultVersion bool `json:"autoUpdatePipelineDefaultVersion"` + Resources *ResourceRequirements `json:"resources,omitempty"` + + // If the Object store/DB is behind a TLS secured connection that is + // unrecognized by the host OpenShift/K8s cluster, then you can + // provide a PEM formatted CA bundle to be injected into the DSP + // server pod to trust this connection. CA Bundle should be provided + // as values within configmaps, mapped to keys. + CABundle *CABundle `json:"cABundle,omitempty"` +} + +type CABundle struct { + // +kubebuilder:validation:Required + ConfigMapName string `json:"configMapName"` + // Key should map to a CA bundle. The key is also used to name + // the CA bundle file (e.g. ca-bundle.crt) + // +kubebuilder:validation:Required + ConfigMapKey string `json:"configMapKey"` } type ArtifactScriptConfigMap struct { @@ -132,10 +148,14 @@ type MariaDB struct { // +kubebuilder:validation:Optional Deploy bool `json:"deploy"` Image string `json:"image,omitempty"` + // The MariadB username that will be created. Should match `^[a-zA-Z0-9_]+` // +kubebuilder:default:=mlpipeline + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_]+$` Username string `json:"username,omitempty"` PasswordSecret *SecretKeyValue `json:"passwordSecret,omitempty"` // +kubebuilder:default:=mlpipeline + // The database name that will be created. Should match `^[a-zA-Z0-9_]+` + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9_]+$` DBName string `json:"pipelineDBName,omitempty"` // +kubebuilder:default:="10Gi" PVCSize resource.Quantity `json:"pvcSize,omitempty"` diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index babfd004..864bddb5 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -39,6 +39,11 @@ func (in *APIServer) DeepCopyInto(out *APIServer) { *out = new(ResourceRequirements) (*in).DeepCopyInto(*out) } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(CABundle) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServer. @@ -66,6 +71,21 @@ func (in *ArtifactScriptConfigMap) DeepCopy() *ArtifactScriptConfigMap { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CABundle) DeepCopyInto(out *CABundle) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CABundle. +func (in *CABundle) DeepCopy() *CABundle { + if in == nil { + return nil + } + out := new(CABundle) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = *in diff --git a/config/base/params.env b/config/base/params.env index cba78664..4716798a 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -1,12 +1,12 @@ -IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server@sha256:c8e4e667654b58f15ab62f7247f566b66a6550d328f61f342b5fa5cfcdf2abd7 -IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager@sha256:1faf2562d81dcfcadb0073cd297dcab9a4e5a3b30c402c4740f0916c1008436b -IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent@sha256:98bcd663fd5bf82b99059a9a6faa3f9fedc3b6097cc266d10f1c1d7954850607 -IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow@sha256:24cb35cce3aefec6462131d43b04ed0a5e98412199dae063cb7b6ea088b1fb07 +IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server@sha256:49d2f183e97c9944ed83fc0c6f3f8d4a1f4561639107892cc23e088d2f26d699 +IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager@sha256:ddcad41425181ee796f318e7c3f8970c086995f3fb01c93ff4601745381fb15b +IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent@sha256:6942324993f37f4ff43d8703a88ed221dbe320643f6b8335c899f8efd3f1e608 +IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow@sha256:22dc3eeac00782a02afbd6195c5d698170c5c84eb399c3b780e1a84706ebd987 IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy@sha256:c491e63c8885c7d59005f9305b77cd1fa776b50e63db90c4f8ccdee963759630 IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc@sha256:4af88c246d77cce33099489090508734978aafa83a0a5745408ae8d139d5378a -IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer@sha256:0987335a44fadd140d52b5bae37463f4b8dcbe5d59becf94e866975d1b8f1a30 -IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator@sha256:4bfb9b5591e40943bec23a729e9c6a176e4ac790ac9cf9efee781f832ad00242 -IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal@sha256:7394c071ed74ace08cfd51f881c94067fa7a570e7f7e4a0ef0aff1b4f6a2a949 -IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro@sha256:98f8ddc69b6210001351a5fd07993b3a758bc6af3702319493f7a5582dd65a9a -IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103@sha256:d0eea30ae4fc8c5bb06d0e4d61d92fba9c0ae40b8023f72702301b70a7537faa +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer@sha256:612c4992e27aa6c21d4bf94f07e227c8e260e8009097002813ad39fab34ebb70 +IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator@sha256:c767076abbd106932a56893bfe419ddfb0af89415664d5572758324db9a96cd6 +IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal@sha256:a49924d9d685a35b2d0817ffe9c84f3429d97e9ad29ed3816c389f45564c9e19 +IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro@sha256:396baed3d689157d96aa7d8988fdfea7eb36684c8335eb391cf1952573e689c1 +IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103@sha256:2370ca7fd91c0d35910006c8ba40bb9651244b2a42050cb9b2dd9d5163155e15 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 2f4bb903..1e5136c7 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -58,6 +58,23 @@ spec: autoUpdatePipelineDefaultVersion: default: true type: boolean + cABundle: + description: If the Object store/DB is behind a TLS secured connection + that is unrecognized by the host OpenShift/K8s cluster, then + you can provide a PEM formatted CA bundle to be injected into + the DSP server pod to trust this connection. CA Bundle should + be provided as values within configmaps, mapped to keys. + properties: + configMapKey: + description: Key should map to a CA bundle. The key is also + used to name the CA bundle file (e.g. ca-bundle.crt) + type: string + configMapName: + type: string + required: + - configMapKey + - configMapName + type: object cacheImage: type: string collectMetrics: @@ -187,6 +204,9 @@ spec: type: object pipelineDBName: default: mlpipeline + description: The database name that will be created. Should + match `^[a-zA-Z0-9_]+` + pattern: ^[a-zA-Z0-9_]+$ type: string pvcSize: anyOf: @@ -235,6 +255,9 @@ spec: type: object username: default: mlpipeline + description: The MariadB username that will be created. Should + match `^[a-zA-Z0-9_]+` + pattern: ^[a-zA-Z0-9_]+$ type: string type: object type: object diff --git a/config/internal/apiserver/artifact_script.yaml.tmpl b/config/internal/apiserver/artifact_script.yaml.tmpl index ed2d9f7d..731cea1d 100644 --- a/config/internal/apiserver/artifact_script.yaml.tmpl +++ b/config/internal/apiserver/artifact_script.yaml.tmpl @@ -6,13 +6,22 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { +{{ if .APIServer.CABundle }} + aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} --ca-bundle {{ .PiplinesCABundleMountPath }}/{{ .APIServer.CABundle.ConfigMapKey }} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz +{{ else }} + aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz +{{ end }} + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint {{.ObjectStorageConnection.Endpoint}} cp $1.tgz s3://{{.ObjectStorageConnection.Bucket}}/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/deployment.yaml.tmpl index 2e42d702..f9fe2e72 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/deployment.yaml.tmpl @@ -50,6 +50,14 @@ spec: value: "{{.APIServer.ArtifactImage}}" - name: ARCHIVE_LOGS value: "{{.APIServer.ArchiveLogs}}" + {{ if .APIServer.CABundle }} + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME + value: "{{.APIServer.CABundle.ConfigMapName}}" + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY + value: "{{.APIServer.CABundle.ConfigMapKey}}" + - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH + value: {{ .PiplinesCABundleMountPath }} + {{ end }} - name: TRACK_ARTIFACTS value: "{{.APIServer.TrackArtifacts}}" - name: STRIP_EOF @@ -145,13 +153,19 @@ spec: memory: {{.APIServer.Resources.Limits.Memory}} {{ end }} {{ end }} - {{ if .APIServer.EnableSamplePipeline }} + {{ if or .APIServer.EnableSamplePipeline .APIServer.CABundle }} volumeMounts: + {{ if .APIServer.EnableSamplePipeline }} - name: sample-config mountPath: /config/sample_config.json subPath: sample_config.json - name: sample-pipeline mountPath: /samples/ + {{ end }} + {{ if .APIServer.CABundle }} + - mountPath: {{ .APIServerPiplinesCABundleMountPath }} + name: ca-bundle + {{ end }} {{ end }} {{ if .APIServer.EnableRoute }} - name: oauth-proxy @@ -206,6 +220,14 @@ spec: - name: proxy-tls secret: secretName: ds-pipelines-proxy-tls-{{.Name}} + {{ if .APIServer.CABundle }} + - name: ca-bundle + configMap: + name: {{ .APIServer.CABundle.ConfigMapName }} + items: + - key: {{ .APIServer.CABundle.ConfigMapKey }} + path: {{ .APIServer.CABundle.ConfigMapKey }} + {{ end }} {{ if .APIServer.EnableSamplePipeline }} - name: sample-config configMap: diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 4abfdb23..b13ef3c7 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -24,12 +24,13 @@ import ( ) const ( - DefaultImageValue = "MustSetInConfig" - - MLPipelineUIConfigMapPrefix = "ds-pipeline-ui-configmap-" - ArtifactScriptConfigMapNamePrefix = "ds-pipeline-artifact-script-" - ArtifactScriptConfigMapKey = "artifact_script" - DSPServicePrefix = "ds-pipeline" + DefaultImageValue = "MustSetInConfig" + APIServerPiplinesCABundleMountPath = "/etc/pki/tls/certs" + PiplinesCABundleMountPath = "/etc/pki/tls/certs" + MLPipelineUIConfigMapPrefix = "ds-pipeline-ui-configmap-" + ArtifactScriptConfigMapNamePrefix = "ds-pipeline-artifact-script-" + ArtifactScriptConfigMapKey = "artifact_script" + DSPServicePrefix = "ds-pipeline" DBSecretNamePrefix = "ds-pipeline-db-" DBSecretKey = "password" diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 1315ae83..a19b2446 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -223,7 +223,7 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. // Get Prereq Status (DB and ObjStore Ready) dbAvailable := r.isDatabaseAccessible(ctx, dspa, params) objStoreAvailable := r.isObjectStorageAccessible(ctx, dspa, params) - dspaPrereqsReady := (dbAvailable && objStoreAvailable) + dspaPrereqsReady := dbAvailable && objStoreAvailable if dspaPrereqsReady { // Manage Common Manifests diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index b4ef158d..c5101cd0 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -36,18 +36,21 @@ import ( ) type DSPAParams struct { - Name string - Namespace string - Owner mf.Owner - APIServer *dspa.APIServer - APIServerServiceName string - OAuthProxy string - ScheduledWorkflow *dspa.ScheduledWorkflow - PersistenceAgent *dspa.PersistenceAgent - MlPipelineUI *dspa.MlPipelineUI - MariaDB *dspa.MariaDB - Minio *dspa.Minio - MLMD *dspa.MLMD + Name string + Namespace string + Owner mf.Owner + APIServer *dspa.APIServer + APIServerPiplinesCABundleMountPath string + PiplinesCABundleMountPath string + APIServerServiceName string + APICustomPemCerts []byte + OAuthProxy string + ScheduledWorkflow *dspa.ScheduledWorkflow + PersistenceAgent *dspa.PersistenceAgent + MlPipelineUI *dspa.MlPipelineUI + MariaDB *dspa.MariaDB + Minio *dspa.Minio + MLMD *dspa.MLMD DBConnection ObjectStorageConnection } @@ -435,8 +438,8 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.Minio = dsp.Spec.ObjectStorage.Minio.DeepCopy() p.OAuthProxy = config.GetStringConfigWithDefault(config.OAuthProxyImagePath, config.DefaultImageValue) p.MLMD = dsp.Spec.MLMD.DeepCopy() - - // TODO: If p. is nil we should create defaults + p.APIServerPiplinesCABundleMountPath = config.APIServerPiplinesCABundleMountPath + p.PiplinesCABundleMountPath = config.PiplinesCABundleMountPath if p.APIServer != nil { @@ -458,7 +461,20 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip Key: config.ArtifactScriptConfigMapKey, } } + + // If a Custom CA Bundle is specified for injection into DSP API Server Pod + // then retrieve the bundle to utilize during storage health check + if p.APIServer.CABundle != nil { + cfgKey, cfgName := p.APIServer.CABundle.ConfigMapKey, p.APIServer.CABundle.ConfigMapName + err, val := util.GetConfigMapValue(ctx, cfgKey, cfgName, p.Namespace, client, log) + if err != nil { + log.Error(err, "Encountered error when attempting to retrieve CABundle from configmap") + return err + } + p.APICustomPemCerts = []byte(val) + } } + if p.PersistenceAgent != nil { persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(config.PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) diff --git a/controllers/storage.go b/controllers/storage.go index b06a19db..fedc3a2b 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -18,13 +18,16 @@ package controllers import ( "context" + "crypto/x509" "encoding/base64" + "errors" "fmt" "github.com/go-logr/logr" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" + "github.com/opendatahub-io/data-science-pipelines-operator/controllers/util" "net/http" ) @@ -38,11 +41,14 @@ var storageTemplates = []string{ storageSecret, } -func joinHostPort(host, port string) string { +func joinHostPort(host, port string) (string, error) { + if host == "" { + return "", errors.New("Object Storage Connection missing host") + } if port == "" { - return host + return host, nil } - return fmt.Sprintf("%s:%s", host, port) + return fmt.Sprintf("%s:%s", host, port), nil } func createCredentialProvidersChain(accessKey, secretKey string) *credentials.Credentials { @@ -63,12 +69,46 @@ func createCredentialProvidersChain(accessKey, secretKey string) *credentials.Cr return credentials.New(&credentials.Chain{Providers: providers}) } -var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { +func getHttpsTransportWithCACert(log logr.Logger, pemCerts []byte) (*http.Transport, error) { + transport, err := minio.DefaultTransport(true) + if err != nil { + return nil, fmt.Errorf("Error creating default transport : %s", err) + } + + if transport.TLSClientConfig.RootCAs == nil { + pool, err := x509.SystemCertPool() + if err != nil { + log.Error(err, "error initializing TLS Pool: %s") + transport.TLSClientConfig.RootCAs = x509.NewCertPool() + } else { + transport.TLSClientConfig.RootCAs = pool + } + } + + if ok := transport.TLSClientConfig.RootCAs.AppendCertsFromPEM(pemCerts); !ok { + return nil, fmt.Errorf("error parsing CA Certificate, ensure provided certs are in valid PEM format") + } + return transport, nil +} + +var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { cred := createCredentialProvidersChain(string(accesskey), string(secretkey)) - minioClient, err := minio.New(endpoint, &minio.Options{ + + opts := &minio.Options{ Creds: cred, Secure: secure, - }) + } + + if len(pemCerts) != 0 { + tr, err := getHttpsTransportWithCACert(log, pemCerts) + if err != nil { + log.Error(err, "Encountered error when processing custom ca bundle.") + return false + } + opts.Transport = tr + } + + minioClient, err := minio.New(endpoint, opts) if err != nil { log.Info(fmt.Sprintf("Could not connect to object storage endpoint: %s", endpoint)) return false @@ -88,6 +128,15 @@ var ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoin return true } } + + if util.IsX509UnknownAuthorityError(err) { + log.Error(err, "Encountered x509 UnknownAuthorityError when connecting to ObjectStore. "+ + "If using an tls S3 connection with self-signed certs, you may specify a custom CABundle "+ + "to mount on the DSP API Server via the DSPA cr under the spec.cABundle field. If you have already "+ + "provided a CABundle, verify the validity of the provided CABundle.") + return false + } + // Every other error means the endpoint in inaccessible, or the credentials provided do not have, at a minimum GetObject, permissions log.Info(fmt.Sprintf("Could not connect to (%s), Error: %s", endpoint, err.Error())) return false @@ -107,7 +156,12 @@ func (r *DSPAReconciler) isObjectStorageAccessible(ctx context.Context, dsp *dsp log.Info("Performing Object Storage Health Check") - endpoint := joinHostPort(params.ObjectStorageConnection.Host, params.ObjectStorageConnection.Port) + endpoint, err := joinHostPort(params.ObjectStorageConnection.Host, params.ObjectStorageConnection.Port) + if err != nil { + log.Error(err, "Could not determine Object Storage Endpoint") + return false + } + accesskey, err := base64.StdEncoding.DecodeString(params.ObjectStorageConnection.AccessKeyID) if err != nil { log.Error(err, "Could not decode Object Storage Access Key ID") @@ -120,7 +174,7 @@ func (r *DSPAReconciler) isObjectStorageAccessible(ctx context.Context, dsp *dsp return false } - verified := ConnectAndQueryObjStore(ctx, log, endpoint, params.ObjectStorageConnection.Bucket, accesskey, secretkey, *params.ObjectStorageConnection.Secure) + verified := ConnectAndQueryObjStore(ctx, log, endpoint, params.ObjectStorageConnection.Bucket, accesskey, secretkey, *params.ObjectStorageConnection.Secure, params.APICustomPemCerts) if verified { log.Info("Object Storage Health Check Successful") } else { diff --git a/controllers/storage_test.go b/controllers/storage_test.go index db625a8e..f2dfed10 100644 --- a/controllers/storage_test.go +++ b/controllers/storage_test.go @@ -18,8 +18,12 @@ limitations under the License. package controllers import ( + "context" + "encoding/base64" "testing" + "github.com/go-logr/logr" + "github.com/minio/minio-go/v7/pkg/credentials" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/stretchr/testify/assert" @@ -86,6 +90,7 @@ func TestDeployStorage(t *testing.T) { assert.True(t, created) assert.Nil(t, err) } + func TestDontDeployStorage(t *testing.T) { testNamespace := "testnamespace" testDSPAName := "testdspa" @@ -181,3 +186,293 @@ func TestDefaultDeployBehaviorStorage(t *testing.T) { assert.False(t, created) assert.Nil(t, err) } + +func TestIsDatabaseAccessibleTrue(t *testing.T) { + // Override the live connection function with a mock version + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { + return true + } + + testNamespace := "testnamespace" + testDSPAName := "testdspa" + + // Minimal Inputs + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + }, + }, + } + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params (unused) + ctx, _, reconciler := CreateNewTestObjects() + + SecureConnection := false + params := &DSPAParams{ + ObjectStorageConnection: ObjectStorageConnection{ + Host: "foo", + Port: "1337", + Secure: &SecureConnection, + AccessKeyID: base64.StdEncoding.EncodeToString([]byte("fooaccesskey")), + SecretAccessKey: base64.StdEncoding.EncodeToString([]byte("foosecretkey")), + }, + } + + verified := reconciler.isObjectStorageAccessible(ctx, dspa, params) + assert.True(t, verified) +} + +func TestIsDatabaseNotAccessibleFalse(t *testing.T) { + // Override the live connection function with a mock version + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { + return false + } + + testNamespace := "testnamespace" + testDSPAName := "testdspa" + + // Minimal Inputs + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + }, + }, + } + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params (unused) + ctx, _, reconciler := CreateNewTestObjects() + + SecureConnection := false + params := &DSPAParams{ + ObjectStorageConnection: ObjectStorageConnection{ + Host: "foo", + Port: "1337", + Secure: &SecureConnection, + AccessKeyID: base64.StdEncoding.EncodeToString([]byte("fooaccesskey")), + SecretAccessKey: base64.StdEncoding.EncodeToString([]byte("foosecretkey")), + }, + } + + verified := reconciler.isObjectStorageAccessible(ctx, dspa, params) + assert.False(t, verified) +} + +func TestDisabledHealthCheckReturnsTrue(t *testing.T) { + // Override the live connection function with a mock version that would always return false if called + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { + return false + } + + testNamespace := "testnamespace" + testDSPAName := "testdspa" + + // Minimal Inputs + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: true, + }, + }, + } + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params (unused) + ctx, _, reconciler := CreateNewTestObjects() + + SecureConnection := false + params := &DSPAParams{ + ObjectStorageConnection: ObjectStorageConnection{ + Host: "foo", + Port: "1337", + Secure: &SecureConnection, + AccessKeyID: base64.StdEncoding.EncodeToString([]byte("fooaccesskey")), + SecretAccessKey: base64.StdEncoding.EncodeToString([]byte("foosecretkey")), + }, + } + + verified := reconciler.isObjectStorageAccessible(ctx, dspa, params) + // if health check is disabled this should always return True + // even thought the mock connection function would return false if called + assert.True(t, verified) +} + +func TestIsDatabaseAccessibleBadAccessKey(t *testing.T) { + // Override the live connection function with a mock version + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { + return true + } + + testNamespace := "testnamespace" + testDSPAName := "testdspa" + + // Minimal Inputs + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + }, + }, + } + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params (unused) + ctx, _, reconciler := CreateNewTestObjects() + + SecureConnection := false + params := &DSPAParams{ + ObjectStorageConnection: ObjectStorageConnection{ + Host: "foo", + Port: "1337", + Secure: &SecureConnection, + AccessKeyID: "this-is-not-a-base64-encoded-string", + SecretAccessKey: base64.StdEncoding.EncodeToString([]byte("foosecretkey")), + }, + } + + verified := reconciler.isObjectStorageAccessible(ctx, dspa, params) + assert.False(t, verified) +} + +func TestIsDatabaseAccessibleBadSecretKey(t *testing.T) { + // Override the live connection function with a mock version + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { + return true + } + + testNamespace := "testnamespace" + testDSPAName := "testdspa" + + // Minimal Inputs + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + }, + }, + } + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params (unused) + ctx, _, reconciler := CreateNewTestObjects() + + SecureConnection := false + params := &DSPAParams{ + ObjectStorageConnection: ObjectStorageConnection{ + Host: "foo", + Port: "1337", + Secure: &SecureConnection, + AccessKeyID: base64.StdEncoding.EncodeToString([]byte("fooaccesskey")), + SecretAccessKey: "this-is-not-a-base64-encoded-string", + }, + } + + verified := reconciler.isObjectStorageAccessible(ctx, dspa, params) + assert.False(t, verified) +} + +func TestJoinHostPort(t *testing.T) { + tests := map[string]struct { + host string + port string + expectedResult string + expectedError bool + }{ + "host and port defined": {host: "somehost", port: "1234", expectedResult: "somehost:1234", expectedError: false}, + "empty port": {host: "somehost", port: "", expectedResult: "somehost", expectedError: false}, + "empty host": {host: "", port: "1234", expectedResult: "", expectedError: true}, + "both empty": {host: "", port: "", expectedResult: "", expectedError: true}, + } + + for _, test := range tests { + actualResult, actualError := joinHostPort(test.host, test.port) + if test.expectedError { + assert.NotNil(t, actualError) + } else { + assert.Equal(t, test.expectedResult, actualResult) + assert.Nil(t, actualError) + } + } +} + +func TestCreateCredentialProvidersChain(t *testing.T) { + tests := map[string]struct { + accesskey string + secretkey string + expectedSigType credentials.SignatureType + }{ + "both keys defined": { + accesskey: "fakeaccesskey", + secretkey: "fakesecretkey", + expectedSigType: credentials.SignatureV4, + }, + "no access key": { + accesskey: "", + secretkey: "fakesecretkey", + expectedSigType: credentials.SignatureAnonymous, + }, + "no secret key": { + accesskey: "fakeaccesskey", + secretkey: "", + expectedSigType: credentials.SignatureAnonymous, + }, + "both keys empty": { + accesskey: "", + secretkey: "", + expectedSigType: credentials.SignatureAnonymous, + }, + } + + // Run Tests + for _, test := range tests { + actual := createCredentialProvidersChain(test.accesskey, test.secretkey) + actualCreds, err := actual.Get() + assert.Nil(t, err) + + actualSigType := actualCreds.SignerType + assert.Equal(t, test.expectedSigType, actualSigType) + } +} + +func TestGetHttpsTransportWithCACert(t *testing.T) { + validCert := ` +-----BEGIN CERTIFICATE----- +MIIDUTCCAjmgAwIBAgIINk8kYK1jtAYwDQYJKoZIhvcNAQELBQAwNjE0MDIGA1UE +Awwrb3BlbnNoaWZ0LXNlcnZpY2Utc2VydmluZy1zaWduZXJAMTY5NzQ4MDY4NjAe +Fw0yMzEwMTYxODI0NDVaFw0yNTEyMTQxODI0NDZaMDYxNDAyBgNVBAMMK29wZW5z +aGlmdC1zZXJ2aWNlLXNlcnZpbmctc2lnbmVyQDE2OTc0ODA2ODYwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDzSg9LmRucYyv9OUbMjbTGlvLFXl9+vsKd +rdZEq+jR5jr+lhxvU06rezHcTn7hXmm9g66YQhjfJ239VSh/YkQFqlaGY89lEtfr +fJzAkxpX0xmPhjAQ4fpsBs6LfkgC2v846oR2+gsI5hh5VuWNRS6BJlgRIQYUHBqM +p/d8QghkST1mheZKQZh4V9L1aB4Hgo4SCPNVGa/t0Q5sBZmlvC+6JqxsZW8miF/v +rs0oqm9dwhyAsTuLdDAD4bnLPXBQD7z+aq87uBNWcOrl0p/TdJy85lhE0dmbVKS6 +c21lQ4Va5JNje25fJmtEviFDAVXc/akMWSHf94ZfbWN8eah29oHNAgMBAAGjYzBh +MA4GA1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRxBglm +SbrHzzijOCr6EQ2LOTZi5jAfBgNVHSMEGDAWgBRxBglmSbrHzzijOCr6EQ2LOTZi +5jANBgkqhkiG9w0BAQsFAAOCAQEAeENDkKOUebgjb5Jg3d0WLHjqF+xMofXo1Gvg +wkfrZ35hQTMOiFUAffyPRoMfZOJ5x4zUVPkXN1qjVe/oIc19EFgb7ppDXUTJDndu +4RfZCF/yim5C6vUFmPPHjbFxnJIo85pKWGLwGg79iTnExDYMUUg5pRfK1uNgfro9 +jEtEoP3F3YVZ8g75TF70Ad9AHPWD2c1D8xOI4XwFvyi5BJJ+jsChl1e3v8D07ohj +Em/2fyF49JL+vAPFMWRFpaExUr3gMbELo4YABQGg024d623LK0ienEF0p4jMVNbP +S9IA40yOaVHMI51Fr1i1EIWvP8oJY8rAPWq45JnfFen3tOqKfw== +-----END CERTIFICATE----- +` + _, _, reconciler := CreateNewTestObjects() + + transport, err := getHttpsTransportWithCACert(reconciler.Log, []byte(validCert)) + assert.Nil(t, err) + assert.NotNil(t, transport) + + invalidCert := "invalidCert" + transport, err = getHttpsTransportWithCACert(reconciler.Log, []byte(invalidCert)) + assert.NotNil(t, err) + assert.Nil(t, transport) +} diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 16853901..c8e9c280 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -76,7 +76,7 @@ var _ = BeforeEach(func() { ConnectAndQueryDatabase = func(host string, port string, username string, password string, dbname string) bool { return true } - ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool) bool { + ConnectAndQueryObjStore = func(ctx context.Context, log logr.Logger, endpoint, bucket string, accesskey, secretkey []byte, secure bool, pemCerts []byte) bool { return true } }) diff --git a/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml index 5863fd2d..cc7a02b1 100644 --- a/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp0.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml index beb35896..ad0f15ce 100644 --- a/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml index 2c1b87ec..3c41745d 100644 --- a/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_3/expected/not_created/configmap_artifact_script.yaml @@ -3,9 +3,23 @@ data: somekey: |- #!/usr/bin/env sh push_artifact() { - if [ -f "$2" ]; then - tar -cvzf $1.tgz $2 - aws s3 --endpoint http://minio-testdsp3.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") + workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) + artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp3.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + + if [ -f "$workspace_dest/$artifact_name" ]; then + echo sending to: ${workspace_dest}/${artifact_name} + tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} + aws_cp $1 + elif [ -f "$2" ]; then + tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml index cc4ba319..e0bddf31 100644 --- a/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp4.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml index e384c59c..33aebad0 100644 --- a/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp5.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi diff --git a/controllers/testdata/declarative/case_6/config.yaml b/controllers/testdata/declarative/case_6/config.yaml new file mode 100644 index 00000000..35a3506f --- /dev/null +++ b/controllers/testdata/declarative/case_6/config.yaml @@ -0,0 +1,12 @@ +# When a minimal DSPA is deployed +Images: + ApiServer: api-server:test6 + Artifact: artifact-manager:test6 + PersistentAgent: persistenceagent:test6 + ScheduledWorkflow: scheduledworkflow:test6 + Cache: ubi-minimal:test6 + MoveResultsImage: busybox:test6 + MlPipelineUI: frontend:test6 + MariaDB: mariadb:test6 + Minio: minio:test6 + OAuthProxy: oauth-proxy:test6 diff --git a/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml b/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml new file mode 100644 index 00000000..0b36acba --- /dev/null +++ b/controllers/testdata/declarative/case_6/deploy/00_configmap.yaml @@ -0,0 +1,6 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: testcabundleconfigmap6 +data: + testcabundleconfigmapkey6.crt: testcabundleconfigmapvalue6 diff --git a/controllers/testdata/declarative/case_6/deploy/01_cr.yaml b/controllers/testdata/declarative/case_6/deploy/01_cr.yaml new file mode 100644 index 00000000..a3e909ec --- /dev/null +++ b/controllers/testdata/declarative/case_6/deploy/01_cr.yaml @@ -0,0 +1,27 @@ +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: testdsp6 +spec: + apiServer: + deploy: true + enableSamplePipeline: false + cABundle: + configMapName: testcabundleconfigmap6 + configMapKey: testcabundleconfigmapkey6.crt + persistenceAgent: + deploy: false + scheduledWorkflow: + deploy: false + mlpipelineUI: + deploy: false + image: frontend:test0 + database: + mariaDB: + deploy: false + objectStorage: + minio: + deploy: false + image: minio:test0 + mlmd: + deploy: false diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml new file mode 100644 index 00000000..96c5967e --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -0,0 +1,206 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - env: + - name: POD_NAMESPACE + value: "default" + - name: DBCONFIG_USER + value: "mlpipeline" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp6" + - name: DBCONFIG_DBNAME + value: "mlpipeline" + - name: DBCONFIG_HOST + value: "mariadb-testdsp6.default.svc.cluster.local" + - name: DBCONFIG_PORT + value: "3306" + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp6.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp6" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test6" + - name: ARCHIVE_LOGS + value: "false" + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME + value: testcabundleconfigmap6 + - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY + value: testcabundleconfigmapkey6.crt + - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH + value: /etc/pki/tls/certs + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp6" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "true" + - name: DBCONFIG_CONMAXLIFETIMESEC + value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "mlpipeline" + - name: OBJECTSTORECONFIG_ACCESSKEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECRETACCESSKEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: MINIO_SERVICE_SERVICE_HOST + value: "minio-testdsp6.default.svc.cluster.local" + - name: MINIO_SERVICE_SERVICE_PORT + value: "9000" + - name: CACHE_IMAGE + value: "ubi-minimal:test6" + - name: MOVERESULTS_IMAGE + value: "busybox:test6" + image: api-server:test6 + imagePullPolicy: Always + name: ds-pipeline-api-server + ports: + - containerPort: 8888 + name: http + protocol: TCP + - containerPort: 8887 + name: grpc + protocol: TCP + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 250m + memory: 500Mi + limits: + cpu: 500m + memory: 1Gi + volumeMounts: + - name: ca-bundle + mountPath: /etc/pki/tls/certs + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp6 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp6","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test6 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + volumes: + - name: proxy-tls + secret: + secretName: ds-pipelines-proxy-tls-testdsp6 + defaultMode: 420 + - name: ca-bundle + configMap: + name: testcabundleconfigmap6 + items: + - key: testcabundleconfigmapkey6.crt + path: testcabundleconfigmapkey6.crt + defaultMode: 420 + serviceAccountName: ds-pipeline-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml new file mode 100644 index 00000000..2cbb8402 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/created/configmap_artifact_script.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +data: + artifact_script: |- + #!/usr/bin/env sh + push_artifact() { + workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") + workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) + artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp6.default.svc.cluster.local:9000 --ca-bundle /etc/pki/tls/certs/testcabundleconfigmapkey6.crt cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + + if [ -f "$workspace_dest/$artifact_name" ]; then + echo sending to: ${workspace_dest}/${artifact_name} + tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} + aws_cp $1 + elif [ -f "$2" ]; then + tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} + aws_cp $1 + else + echo "$2 file does not exist. Skip artifact tracking for $1" + fi + } + push_log() { + cat /var/log/containers/$PODNAME*$NAMESPACE*step-main*.log > step-main.log + push_artifact main-log step-main.log + } + strip_eof() { + if [ -f "$2" ]; then + awk 'NF' $2 | head -c -1 > $1_temp_save && cp $1_temp_save $2 + fi + } +kind: ConfigMap +metadata: + name: ds-pipeline-artifact-script-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp5 + component: data-science-pipelines diff --git a/controllers/util/util.go b/controllers/util/util.go index 4e86338f..555ce093 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -17,8 +17,17 @@ limitations under the License. package util import ( + "context" + "crypto/x509" + "fmt" + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "net/url" + "sigs.k8s.io/controller-runtime/pkg/client" ) // GetConditionByType returns condition of type condType if it exists in conditions, otherwise @@ -44,3 +53,35 @@ func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.Depl func BoolPointer(b bool) *bool { return &b } + +// IsX509UnknownAuthorityError checks whether an error is of type x509.UnknownAuthorityError. +func IsX509UnknownAuthorityError(err error) bool { + urlErr, ok := err.(*url.Error) + if !ok { + return false + } + _, ok = urlErr.Err.(x509.UnknownAuthorityError) + return ok +} + +// GetConfigMapValue fetches the value for the provided configmap mapped to a given key +func GetConfigMapValue(ctx context.Context, cfgKey, cfgName, ns string, client client.Client, log logr.Logger) (error, string) { + cfgMap := &v1.ConfigMap{} + namespacedName := types.NamespacedName{ + Name: cfgName, + Namespace: ns, + } + err := client.Get(ctx, namespacedName, cfgMap) + if err != nil && apierrs.IsNotFound(err) { + log.Error(err, fmt.Sprintf("ConfigMap [%s] was not found in namespace [%s]", cfgName, ns)) + return err, "" + } else if err != nil { + log.Error(err, fmt.Sprintf("Encountered error when attempting to fetch ConfigMap. [%s]..", cfgName)) + return err, "" + } + if val, ok := cfgMap.Data[cfgKey]; ok { + return nil, val + } else { + return fmt.Errorf("ConfigMap %s sdoes not contain specified key %s", cfgName, cfgKey), "" + } +} diff --git a/docs/example_pipelines/condition.py b/docs/example_pipelines/condition.py index 5408a05d..971295fa 100644 --- a/docs/example_pipelines/condition.py +++ b/docs/example_pipelines/condition.py @@ -24,11 +24,11 @@ def print_msg(msg: str): flip_coin_op = components.create_component_from_func( - flip_coin, base_image='quay.io/hukhan/python:alpine3.6') + flip_coin, base_image='registry.access.redhat.com/ubi8/python-39') print_op = components.create_component_from_func( - print_msg, base_image='quay.io/hukhan/python:alpine3.6') + print_msg, base_image='registry.access.redhat.com/ubi8/python-39') random_num_op = components.create_component_from_func( - random_num, base_image='quay.io/hukhan/python:alpine3.6') + random_num, base_image='registry.access.redhat.com/ubi8/python-39') @dsl.pipeline( @@ -54,4 +54,9 @@ def flipcoin_pipeline(): if __name__ == '__main__': from kfp_tekton.compiler import TektonCompiler - TektonCompiler().compile(flipcoin_pipeline, __file__.replace('.py', '.yaml')) + from kfp_tekton.compiler.pipeline_utils import TektonPipelineConf + config = TektonPipelineConf() + config.set_condition_image_name("registry.access.redhat.com/ubi8/python-39") + compiler = TektonCompiler() + compiler._set_pipeline_conf(config) + compiler.compile(flipcoin_pipeline, __file__.replace('.py', '.yaml')) diff --git a/docs/example_pipelines/condition.yaml b/docs/example_pipelines/condition.yaml index 4b601d13..77a0f6e2 100644 --- a/docs/example_pipelines/condition.yaml +++ b/docs/example_pipelines/condition.yaml @@ -81,7 +81,7 @@ spec: pass with open(output_file, 'w') as f: f.write(_output_serializers[idx](_outputs[idx])) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 results: - name: Output type: string @@ -91,7 +91,7 @@ spec: pipelines.kubeflow.org/cache_enabled: "true" annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Flip coin", "outputs": - [{"name": "Output", "type": "String"}], "version": "Flip coin@sha256=2895da7ed0b52f45e6d6a4d8837e4c1ee4ba99e14e5877320155243c67e4610e"}' + [{"name": "Output", "type": "String"}], "version": "Flip coin@sha256=32d5bd05b9fa18850505b73d6fb8489cc61f83033306230c8e4da12bdd8890e0"}' - name: random-num taskSpec: steps: @@ -151,7 +151,7 @@ spec: pass with open(output_file, 'w') as f: f.write(_output_serializers[idx](_outputs[idx])) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 results: - name: Output type: string @@ -162,7 +162,7 @@ spec: annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Random num", "outputs": [{"name": "Output", "type": "Integer"}], "version": "Random - num@sha256=28d899c9492aeb0e45072bedfa35313fbe788233de0fc7a14a81ff8ee071d627"}' + num@sha256=053403c9d093bbdb07a6da42e22012e69fa5132e38cc179dae5f3a629543650c"}' when: - input: $(tasks.condition-1.results.outcome) operator: in @@ -196,7 +196,7 @@ spec: _parsed_args = vars(_parser.parse_args()) _outputs = print_msg(**_parsed_args) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 params: - name: random-num-Output metadata: @@ -204,7 +204,7 @@ spec: pipelines.kubeflow.org/cache_enabled: "true" annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Print msg", "outputs": - [], "version": "Print msg@sha256=c8484b77b0a3f045a812bd6b570ef17df03d4fde0ef50480243561a339f7b024"}' + [], "version": "Print msg@sha256=1d475b025fa0e9910c3c2827a8280bb0fb85abeba446658a944570e1de7f0f98"}' when: - input: $(tasks.condition-2.results.outcome) operator: in @@ -238,7 +238,7 @@ spec: _parsed_args = vars(_parser.parse_args()) _outputs = print_msg(**_parsed_args) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 params: - name: random-num-Output metadata: @@ -246,7 +246,7 @@ spec: pipelines.kubeflow.org/cache_enabled: "true" annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Print msg", "outputs": - [], "version": "Print msg@sha256=c8484b77b0a3f045a812bd6b570ef17df03d4fde0ef50480243561a339f7b024"}' + [], "version": "Print msg@sha256=1d475b025fa0e9910c3c2827a8280bb0fb85abeba446658a944570e1de7f0f98"}' when: - input: $(tasks.condition-3.results.outcome) operator: in @@ -311,7 +311,7 @@ spec: pass with open(output_file, 'w') as f: f.write(_output_serializers[idx](_outputs[idx])) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 results: - name: Output type: string @@ -322,7 +322,7 @@ spec: annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Random num", "outputs": [{"name": "Output", "type": "Integer"}], "version": "Random - num@sha256=28d899c9492aeb0e45072bedfa35313fbe788233de0fc7a14a81ff8ee071d627"}' + num@sha256=053403c9d093bbdb07a6da42e22012e69fa5132e38cc179dae5f3a629543650c"}' when: - input: $(tasks.condition-4.results.outcome) operator: in @@ -356,7 +356,7 @@ spec: _parsed_args = vars(_parser.parse_args()) _outputs = print_msg(**_parsed_args) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 params: - name: random-num-2-Output metadata: @@ -364,7 +364,7 @@ spec: pipelines.kubeflow.org/cache_enabled: "true" annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Print msg", "outputs": - [], "version": "Print msg@sha256=c8484b77b0a3f045a812bd6b570ef17df03d4fde0ef50480243561a339f7b024"}' + [], "version": "Print msg@sha256=1d475b025fa0e9910c3c2827a8280bb0fb85abeba446658a944570e1de7f0f98"}' when: - input: $(tasks.condition-5.results.outcome) operator: in @@ -398,7 +398,7 @@ spec: _parsed_args = vars(_parser.parse_args()) _outputs = print_msg(**_parsed_args) - image: quay.io/hukhan/python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 params: - name: random-num-2-Output metadata: @@ -406,7 +406,7 @@ spec: pipelines.kubeflow.org/cache_enabled: "true" annotations: pipelines.kubeflow.org/component_spec_digest: '{"name": "Print msg", "outputs": - [], "version": "Print msg@sha256=c8484b77b0a3f045a812bd6b570ef17df03d4fde0ef50480243561a339f7b024"}' + [], "version": "Print msg@sha256=1d475b025fa0e9910c3c2827a8280bb0fb85abeba446658a944570e1de7f0f98"}' when: - input: $(tasks.condition-6.results.outcome) operator: in @@ -452,7 +452,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 - name: condition-2 params: - name: operand1 @@ -493,7 +493,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 when: - input: $(tasks.condition-1.results.outcome) operator: in @@ -539,7 +539,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 when: - input: $(tasks.condition-1.results.outcome) operator: in @@ -585,7 +585,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 - name: condition-5 params: - name: operand1 @@ -626,7 +626,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 when: - input: $(tasks.condition-4.results.outcome) operator: in @@ -672,7 +672,7 @@ spec: f.close() - $(inputs.params.operand1) - $(inputs.params.operand2) - image: python:alpine3.6 + image: registry.access.redhat.com/ubi8/python-39 when: - input: $(tasks.condition-4.results.outcome) operator: in diff --git a/docs/release/compatibility.md b/docs/release/compatibility.md index 86b9d95f..8e1ebd69 100644 --- a/docs/release/compatibility.md +++ b/docs/release/compatibility.md @@ -22,10 +22,11 @@ registries, this is true for the following: | dsp | kfp-tekton | ml-metadata | envoy | ocp-pipelines | oauth-proxy | mariadb-103 | ubi-minimal | ubi-micro | openshift | |-----|-----|-----|-----|-----|-----|-----|-----|-----|-----| -| 1.0.x | 1.5.1 | 1.5.0 | 1.8.4 | v4.10 | v4.12 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | -| 1.1.x | 1.5.1 | 1.5.0 | 1.8.4 | v4.10 | v4.12 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | -| 1.2.x | 1.5.1 | 1.5.0 | 1.8.4 | v4.10 | v4.10 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | -| 1.3.x | 1.5.1 | 1.5.0 | 1.8.4 | v4.10 | v4.10 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | +| 1.0.x | 1.5.1 | 1.5.0 | 1.8.4 | 1.8 | v4.12 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | +| 1.1.x | 1.5.1 | 1.5.0 | 1.8.4 | 1.8 | v4.12 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | +| 1.2.x | 1.5.1 | 1.5.0 | 1.8.4 | 1.8 | v4.10 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | +| 1.3.x | 1.5.1 | 1.5.0 | 1.8.4 | 1.8 | v4.10 | 1 | 8.8 | 8.8 | 4.10,4.11,4.12 | +| 1.4.x | 1.5.1 | 1.5.0 | 1.8.4 | 1.8 | v4.10 | 1 | 8.8 | 8.8 | 4.11,4.12,4.13 | diff --git a/docs/release/compatibility.yaml b/docs/release/compatibility.yaml index 35fd3ecc..64b1cc44 100644 --- a/docs/release/compatibility.yaml +++ b/docs/release/compatibility.yaml @@ -2,7 +2,7 @@ kfp-tekton: 1.5.1 ml-metadata: 1.5.0 envoy: 1.8.4 - ocp-pipelines: v4.10 + ocp-pipelines: 1.8 oauth-proxy: v4.12 mariadb-103: 1 ubi-minimal: 8.8 @@ -12,7 +12,7 @@ kfp-tekton: 1.5.1 ml-metadata: 1.5.0 envoy: 1.8.4 - ocp-pipelines: v4.10 + ocp-pipelines: 1.8 oauth-proxy: v4.12 mariadb-103: 1 ubi-minimal: 8.8 @@ -22,7 +22,7 @@ kfp-tekton: 1.5.1 ml-metadata: 1.5.0 envoy: 1.8.4 - ocp-pipelines: v4.10 + ocp-pipelines: 1.8 oauth-proxy: v4.10 mariadb-103: 1 ubi-minimal: 8.8 @@ -32,9 +32,19 @@ kfp-tekton: 1.5.1 ml-metadata: 1.5.0 envoy: 1.8.4 - ocp-pipelines: v4.10 + ocp-pipelines: 1.8 oauth-proxy: v4.10 mariadb-103: 1 ubi-minimal: 8.8 ubi-micro: 8.8 openshift: 4.10,4.11,4.12 +- dsp: 1.4.x + kfp-tekton: 1.5.1 + ml-metadata: 1.5.0 + envoy: 1.8.4 + ocp-pipelines: 1.8 + oauth-proxy: v4.10 + mariadb-103: 1 + ubi-minimal: 8.8 + ubi-micro: 8.8 + openshift: 4.11,4.12,4.13 diff --git a/go.mod b/go.mod index 1b3e2f56..86cf5eac 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/opendatahub-io/data-science-pipelines-operator -go 1.18 +go 1.19 require ( github.com/fsnotify/fsnotify v1.5.4 diff --git a/tests/upgrades/main.sh b/tests/upgrades/main.sh new file mode 100755 index 00000000..1024ecf9 --- /dev/null +++ b/tests/upgrades/main.sh @@ -0,0 +1,3 @@ +kubectl create namespace ${DSPA_NS} +cd ${GITHUB_WORKSPACE}/config/samples +kustomize build . | kubectl -n ${DSPA_NS} apply -f -