diff --git a/Dockerfiles/Dockerfile b/Dockerfiles/Dockerfile index 990c5e6055a..bc9e7ed65ab 100644 --- a/Dockerfiles/Dockerfile +++ b/Dockerfiles/Dockerfile @@ -37,6 +37,7 @@ COPY components/ components/ COPY controllers/ controllers/ COPY main.go main.go COPY pkg/ pkg/ +COPY infrastructure/ infrastructure/ # Build RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go @@ -51,4 +52,4 @@ RUN chown -R 1001:0 /opt/manifests &&\ chmod -R a+r /opt/manifests USER 1001 -ENTRYPOINT ["/manager"] \ No newline at end of file +ENTRYPOINT ["/manager"] diff --git a/apis/datasciencecluster/v1/zz_generated.deepcopy.go b/apis/datasciencecluster/v1/zz_generated.deepcopy.go index b8e57ccaca7..d141eafc099 100644 --- a/apis/datasciencecluster/v1/zz_generated.deepcopy.go +++ b/apis/datasciencecluster/v1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2023. diff --git a/apis/dscinitialization/v1/dscinitialization_types.go b/apis/dscinitialization/v1/dscinitialization_types.go index 5c98aeb4c08..3c5055f5c95 100644 --- a/apis/dscinitialization/v1/dscinitialization_types.go +++ b/apis/dscinitialization/v1/dscinitialization_types.go @@ -21,6 +21,8 @@ import ( conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) // +operator-sdk:csv:customresourcedefinitions:order=1 @@ -35,9 +37,17 @@ type DSCInitializationSpec struct { // +operator-sdk:csv:customresourcedefinitions:type=spec,order=2 // +optional Monitoring Monitoring `json:"monitoring,omitempty"` + // Configures Service Mesh as networking layer for Data Science Clusters components. + // The Service Mesh is a mandatory prerequisite for single model serving (KServe) and + // you should review this configuration if you are planning to use KServe. + // For other components, it enhances user experience; e.g. it provides unified + // authentication giving a Single Sign On experience. + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 + // +optional + ServiceMesh infrav1.ServiceMeshSpec `json:"serviceMesh,omitempty"` // Internal development useful field to test customizations. // This is not recommended to be used in production environment. - // +operator-sdk:csv:customresourcedefinitions:type=spec,order=3 + // +operator-sdk:csv:customresourcedefinitions:type=spec,order=4 // +optional DevFlags DevFlags `json:"devFlags,omitempty"` } @@ -107,50 +117,9 @@ type DSCInitializationList struct { Items []DSCInitialization `json:"items"` } -// FeatureTracker is a cluster-scoped resource for tracking objects -// created through Features API for Data Science Platform. -// It's primarily used as owner reference for resources created across namespaces so that they can be -// garbage collected by Kubernetes when they're not needed anymore. -// +kubebuilder:object:root=true -// +kubebuilder:resource:scope=Cluster -type FeatureTracker struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - Spec FeatureTrackerSpec `json:"spec,omitempty"` - Status FeatureTrackerStatus `json:"status,omitempty"` -} - -func (s *FeatureTracker) ToOwnerReference() metav1.OwnerReference { - return metav1.OwnerReference{ - APIVersion: s.APIVersion, - Kind: s.Kind, - Name: s.Name, - UID: s.UID, - } -} - -// FeatureTrackerSpec defines the desired state of FeatureTracker. -type FeatureTrackerSpec struct { -} - -// FeatureTrackerStatus defines the observed state of FeatureTracker. -type FeatureTrackerStatus struct { -} - -// +kubebuilder:object:root=true - -// FeatureTrackerList contains a list of FeatureTracker. -type FeatureTrackerList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []FeatureTracker `json:"items"` -} - func init() { SchemeBuilder.Register( &DSCInitialization{}, &DSCInitializationList{}, - &FeatureTracker{}, - &FeatureTrackerList{}, ) } diff --git a/apis/dscinitialization/v1/zz_generated.deepcopy.go b/apis/dscinitialization/v1/zz_generated.deepcopy.go index ea929384365..32af21a674e 100644 --- a/apis/dscinitialization/v1/zz_generated.deepcopy.go +++ b/apis/dscinitialization/v1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2023. @@ -90,6 +89,7 @@ func (in *DSCInitializationList) DeepCopyObject() runtime.Object { func (in *DSCInitializationSpec) DeepCopyInto(out *DSCInitializationSpec) { *out = *in out.Monitoring = in.Monitoring + out.ServiceMesh = in.ServiceMesh out.DevFlags = in.DevFlags } @@ -145,95 +145,6 @@ func (in *DevFlags) DeepCopy() *DevFlags { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureTracker) DeepCopyInto(out *FeatureTracker) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTracker. -func (in *FeatureTracker) DeepCopy() *FeatureTracker { - if in == nil { - return nil - } - out := new(FeatureTracker) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureTracker) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureTrackerList) DeepCopyInto(out *FeatureTrackerList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]FeatureTracker, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerList. -func (in *FeatureTrackerList) DeepCopy() *FeatureTrackerList { - if in == nil { - return nil - } - out := new(FeatureTrackerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeatureTrackerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureTrackerSpec) DeepCopyInto(out *FeatureTrackerSpec) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerSpec. -func (in *FeatureTrackerSpec) DeepCopy() *FeatureTrackerSpec { - if in == nil { - return nil - } - out := new(FeatureTrackerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureTrackerStatus) DeepCopyInto(out *FeatureTrackerStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerStatus. -func (in *FeatureTrackerStatus) DeepCopy() *FeatureTrackerStatus { - if in == nil { - return nil - } - out := new(FeatureTrackerStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Monitoring) DeepCopyInto(out *Monitoring) { *out = *in diff --git a/apis/features/v1/features_types.go b/apis/features/v1/features_types.go new file mode 100644 index 00000000000..fbe6e4bab27 --- /dev/null +++ b/apis/features/v1/features_types.go @@ -0,0 +1,52 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// FeatureTracker represents a cluster-scoped resource in the Data Science Cluster, +// specifically designed for monitoring and managing objects created via the internal Features API. +// This resource serves a crucial role in cross-namespace resource management, acting as +// an owner reference for various resources. The primary purpose of the FeatureTracker +// is to enable efficient garbage collection by Kubernetes. This is essential for +// ensuring that resources are automatically cleaned up and reclaimed when they are +// no longer required. +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +type FeatureTracker struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + Spec FeatureTrackerSpec `json:"spec,omitempty"` + Status FeatureTrackerStatus `json:"status,omitempty"` +} + +func (s *FeatureTracker) ToOwnerReference() metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: s.APIVersion, + Kind: s.Kind, + Name: s.Name, + UID: s.UID, + } +} + +// FeatureTrackerSpec defines the desired state of FeatureTracker. +type FeatureTrackerSpec struct { +} + +// FeatureTrackerStatus defines the observed state of FeatureTracker. +type FeatureTrackerStatus struct { +} + +// +kubebuilder:object:root=true + +// FeatureTrackerList contains a list of FeatureTracker. +type FeatureTrackerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FeatureTracker `json:"items"` +} + +func init() { + SchemeBuilder.Register( + &FeatureTracker{}, + &FeatureTrackerList{}, + ) +} diff --git a/apis/features/v1/groupversion_info.go b/apis/features/v1/groupversion_info.go new file mode 100644 index 00000000000..f976be504b6 --- /dev/null +++ b/apis/features/v1/groupversion_info.go @@ -0,0 +1,37 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +kubebuilder:object:generate=true +// +groupName=features.opendatahub.io + +// Package v1 contains API Schema definitions for the datasciencecluster v1 API group +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "features.opendatahub.io", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/features/v1/zz_generated.deepcopy.go b/apis/features/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..33e184d64f8 --- /dev/null +++ b/apis/features/v1/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureTracker) DeepCopyInto(out *FeatureTracker) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTracker. +func (in *FeatureTracker) DeepCopy() *FeatureTracker { + if in == nil { + return nil + } + out := new(FeatureTracker) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureTracker) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureTrackerList) DeepCopyInto(out *FeatureTrackerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FeatureTracker, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerList. +func (in *FeatureTrackerList) DeepCopy() *FeatureTrackerList { + if in == nil { + return nil + } + out := new(FeatureTrackerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureTrackerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureTrackerSpec) DeepCopyInto(out *FeatureTrackerSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerSpec. +func (in *FeatureTrackerSpec) DeepCopy() *FeatureTrackerSpec { + if in == nil { + return nil + } + out := new(FeatureTrackerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureTrackerStatus) DeepCopyInto(out *FeatureTrackerStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureTrackerStatus. +func (in *FeatureTrackerStatus) DeepCopy() *FeatureTrackerStatus { + if in == nil { + return nil + } + out := new(FeatureTrackerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml index 7dce236c805..1997bc3da93 100644 --- a/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/bundle/manifests/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -216,6 +216,65 @@ spec: - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string + serving: + description: Serving configures the KNative-Serving stack + used for model serving. A Service Mesh (Istio) is prerequisite, + since it is used as networking layer. + properties: + ingressGateway: + description: IngressGateway allows to customize some parameters + for the Istio Ingress Gateway that is bound to KNative-Serving. + properties: + certificate: + description: Certificate specifies configuration of + the TLS certificate securing communications of the + for Ingress Gateway. + properties: + secretName: + description: SecretName specifies the name of + the Kubernetes Secret resource that contains + a TLS certificate secure HTTP communications + for the KNative network. + type: string + type: + default: SelfSigned + description: 'Type specifies if the TLS certificate + should be generated automatically, or if the + certificate is provided by the user. Allowed + values are: * SelfSigned: A certificate is going + to be generated using an own private key. * + Provided: Pre-existence of the TLS Secret (see + SecretName) with a valid certificate is assumed.' + enum: + - SelfSigned + - Provided + type: string + type: object + domain: + description: Domain specifies the DNS name for intercepting + ingress requests coming from outside the cluster. + Most likely, you will want to use a wildcard name, + like *.example.com. If not set, the domain of the + OpenShift Ingress is used. If you choose to generate + a certificate, this is the domain used for the certificate + request. + type: string + type: object + managementState: + default: Removed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: Name specifies the name of the KNativeServing + resource that is going to be created to instruct the + KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object type: object modelmeshserving: description: ModelMeshServing component configuration. Require diff --git a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml index 2110872b9e1..f092fb3dee5 100644 --- a/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/bundle/manifests/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -80,6 +80,48 @@ spec: description: Namespace for monitoring if it is enabled type: string type: object + serviceMesh: + description: Configures Service Mesh as networking layer for Data + Science Clusters components. The Service Mesh is a mandatory prerequisite + for single model serving (KServe) and you should review this configuration + if you are planning to use KServe. For other components, it enhances + user experience; e.g. it provides unified authentication giving + a Single Sign On experience. + properties: + controlPlane: + description: ControlPlane holds configuration of Service Mesh + used by Opendatahub. + properties: + metricsCollection: + default: Istio + description: MetricsCollection specifies if metrics from components + on the Mesh namespace should be collected. Setting the value + to "Istio" will collect metrics from the control plane and + any proxies on the Mesh namespace (like gateway pods). Setting + to "None" will disable metrics collection. + enum: + - Istio + - None + type: string + name: + default: data-science-smcp + description: Name is a name Service Mesh Control Plane. Defaults + to "data-science-smcp". + type: string + namespace: + default: istio-system + description: Namespace is a namespace where Service Mesh is + deployed. Defaults to "istio-system". + type: string + type: object + managementState: + default: Removed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object required: - applicationsNamespace type: object diff --git a/bundle/manifests/dscinitialization.opendatahub.io_featuretrackers.yaml b/bundle/manifests/features.opendatahub.io_featuretrackers.yaml similarity index 70% rename from bundle/manifests/dscinitialization.opendatahub.io_featuretrackers.yaml rename to bundle/manifests/features.opendatahub.io_featuretrackers.yaml index 2414d0d0fa0..ff13ed889ef 100644 --- a/bundle/manifests/dscinitialization.opendatahub.io_featuretrackers.yaml +++ b/bundle/manifests/features.opendatahub.io_featuretrackers.yaml @@ -4,9 +4,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: featuretrackers.dscinitialization.opendatahub.io + name: featuretrackers.features.opendatahub.io spec: - group: dscinitialization.opendatahub.io + group: features.opendatahub.io names: kind: FeatureTracker listKind: FeatureTrackerList @@ -17,10 +17,14 @@ spec: - name: v1 schema: openAPIV3Schema: - description: FeatureTracker is a cluster-scoped resource for tracking objects - created through Features API for Data Science Platform. It's primarily used - as owner reference for resources created across namespaces so that they - can be garbage collected by Kubernetes when they're not needed anymore. + description: FeatureTracker represents a cluster-scoped resource in the Data + Science Cluster, specifically designed for monitoring and managing objects + created via the internal Features API. This resource serves a crucial role + in cross-namespace resource management, acting as an owner reference for + various resources. The primary purpose of the FeatureTracker is to enable + efficient garbage collection by Kubernetes. This is essential for ensuring + that resources are automatically cleaned up and reclaimed when they are + no longer required. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml index 6c7d007369b..77c8bb1d253 100644 --- a/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml +++ b/bundle/manifests/opendatahub-operator.clusterserviceversion.yaml @@ -66,6 +66,20 @@ metadata: "namespace": "opendatahub" } } + }, + { + "apiVersion": "features.opendatahub.io/v1", + "kind": "FeatureTracker", + "metadata": { + "labels": { + "app.kubernetes.io/created-by": "opendatahub-operator", + "app.kubernetes.io/instance": "default", + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "default-feature", + "app.kubernetes.io/part-of": "opendatahub-operator" + }, + "name": "default-feature" + } } ] capabilities: Full Lifecycle @@ -148,6 +162,13 @@ spec: - description: Enable monitoring on specified namespace displayName: Monitoring path: monitoring + - description: Configures Service Mesh as networking layer for Data Science + Clusters components. The Service Mesh is a mandatory prerequisite for single + model serving (KServe) and you should review this configuration if you are + planning to use KServe. For other components, it enhances user experience; + e.g. it provides unified authentication giving a Single Sign On experience. + displayName: Service Mesh + path: serviceMesh - description: Internal development useful field to test customizations. This is not recommended to be used in production environment. displayName: Dev Flags @@ -159,7 +180,7 @@ spec: path: conditions version: v1 - kind: FeatureTracker - name: featuretrackers.dscinitialization.opendatahub.io + name: featuretrackers.features.opendatahub.io version: v1 description: "The Open Data Hub is a machine-learning-as-a-service platform built on Red Hat's Kubernetes-based OpenShift® Container Platform. Open Data Hub integrates @@ -485,6 +506,12 @@ spec: verbs: - list - watch + - apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get - apiGroups: - console.openshift.io resources: @@ -811,18 +838,6 @@ spec: - get - patch - update - - apiGroups: - - dscinitialization.opendatahub.io - resources: - - featuretrackers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - events.k8s.io resources: @@ -854,6 +869,18 @@ spec: - replicasets verbs: - '*' + - apiGroups: + - features.opendatahub.io + resources: + - featuretrackers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - image.openshift.io resources: @@ -1129,6 +1156,12 @@ spec: - deletecollection - get - patch + - apiGroups: + - networking.istio.io + resources: + - gateways + verbs: + - '*' - apiGroups: - networking.istio.io resources: @@ -1200,6 +1233,12 @@ spec: - patch - update - watch + - apiGroups: + - operator.knative.dev + resources: + - knativeservings + verbs: + - '*' - apiGroups: - operator.openshift.io resources: diff --git a/components/kserve/kserve.go b/components/kserve/kserve.go index 036eca76561..4fe365e2bdb 100644 --- a/components/kserve/kserve.go +++ b/components/kserve/kserve.go @@ -12,8 +12,10 @@ import ( dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" "github.com/opendatahub-io/opendatahub-operator/v2/components" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/deploy" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" ) var ( @@ -27,6 +29,9 @@ var ( type Kserve struct { components.Component `json:""` + // Serving configures the KNative-Serving stack used for model serving. A Service + // Mesh (Istio) is prerequisite, since it is used as networking layer. + Serving infrav1.ServingSpec `json:"serving,omitempty"` } func (k *Kserve) OverrideManifests(_ string) error { @@ -87,6 +92,12 @@ func (k *Kserve) ReconcileComponent(cli client.Client, owner metav1.Object, dsci return err } + if !enabled { + if err := k.removeServerlessFeatures(dscispec); err != nil { + return err + } + } + if enabled { // Download manifests and update paths if err = k.OverrideManifests(string(platform)); err != nil { @@ -109,6 +120,10 @@ func (k *Kserve) ReconcileComponent(cli client.Client, owner metav1.Object, dsci ServerlessOperator, ComponentName) } + if err := k.configureServerless(dscispec); err != nil { + return err + } + // Update image parameters only when we do not have customized manifests set if dscispec.DevFlags.ManifestsUri == "" && len(k.DevFlags.Manifests) == 0 { if err := deploy.ApplyParams(Path, k.SetImageParamsMap(imageParamMap), false); err != nil { @@ -149,3 +164,40 @@ func (k *Kserve) DeepCopyInto(target *Kserve) { *target = *k target.Component = k.Component } + +func (k *Kserve) Cleanup(_ client.Client, instance *dsciv1.DSCInitializationSpec) error { + return k.removeServerlessFeatures(instance) +} + +func (k *Kserve) configureServerless(instance *dsciv1.DSCInitializationSpec) error { + if k.Serving.ManagementState == operatorv1.Managed { + if instance.ServiceMesh.ManagementState != operatorv1.Managed { + return fmt.Errorf("service mesh is not configure in DataScienceInitialization cluster but required by KServe serving") + } + + serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) + + if err := serverlessInitializer.Prepare(); err != nil { + return err + } + + if err := serverlessInitializer.Apply(); err != nil { + return err + } + } + + return nil +} + +func (k *Kserve) removeServerlessFeatures(instance *dsciv1.DSCInitializationSpec) error { + serverlessInitializer := feature.NewFeaturesInitializer(instance, k.configureServerlessFeatures) + + if err := serverlessInitializer.Prepare(); err != nil { + return err + } + + if err := serverlessInitializer.Delete(); err != nil { + return err + } + return nil +} diff --git a/components/kserve/serverless_setup.go b/components/kserve/serverless_setup.go new file mode 100644 index 00000000000..9add8c8dc7c --- /dev/null +++ b/components/kserve/serverless_setup.go @@ -0,0 +1,74 @@ +package kserve + +import ( + "path" + "path/filepath" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" +) + +const ( + knativeServingNamespace = "knative-serving" + templatesDir = "templates/serverless" +) + +func (k *Kserve) configureServerlessFeatures(s *feature.FeaturesInitializer) error { + var rootDir = filepath.Join(feature.BaseOutputDir, s.DSCInitializationSpec.ApplicationsNamespace) + if err := feature.CopyEmbeddedFiles(templatesDir, rootDir); err != nil { + return err + } + + servingDeployment, err := feature.CreateFeature("serverless-serving-deployment"). + For(s.DSCInitializationSpec). + Manifests( + path.Join(rootDir, templatesDir, "serving-install"), + ). + WithData(PopulateComponentSettings(k)). + PreConditions( + serverless.EnsureServerlessOperatorInstalled, + serverless.EnsureServerlessAbsent, + servicemesh.EnsureServiceMeshInstalled, + feature.CreateNamespace(knativeServingNamespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(knativeServingNamespace), + ). + Load() + if err != nil { + return err + } + s.Features = append(s.Features, servingDeployment) + + servingIstioGateways, err := feature.CreateFeature("serverless-serving-gateways"). + For(s.DSCInitializationSpec). + PreConditions( + // Check serverless is installed + feature.WaitForResourceToBeCreated(knativeServingNamespace, gvr.KnativeServing), + ). + WithData( + serverless.ServingDefaultValues, + serverless.ServingIngressDomain, + PopulateComponentSettings(k), + ). + WithResources(serverless.ServingCertificateResource). + Manifests( + path.Join(rootDir, templatesDir, "serving-istio-gateways"), + ). + Load() + if err != nil { + return err + } + s.Features = append(s.Features, servingIstioGateways) + + return nil +} + +func PopulateComponentSettings(k *Kserve) feature.Action { + return func(f *feature.Feature) error { + f.Spec.Serving = &k.Serving + return nil + } +} diff --git a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml index 146d6c2493b..f44a034053c 100644 --- a/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml +++ b/config/crd/bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml @@ -217,6 +217,65 @@ spec: - Removed pattern: ^(Managed|Unmanaged|Force|Removed)$ type: string + serving: + description: Serving configures the KNative-Serving stack + used for model serving. A Service Mesh (Istio) is prerequisite, + since it is used as networking layer. + properties: + ingressGateway: + description: IngressGateway allows to customize some parameters + for the Istio Ingress Gateway that is bound to KNative-Serving. + properties: + certificate: + description: Certificate specifies configuration of + the TLS certificate securing communications of the + for Ingress Gateway. + properties: + secretName: + description: SecretName specifies the name of + the Kubernetes Secret resource that contains + a TLS certificate secure HTTP communications + for the KNative network. + type: string + type: + default: SelfSigned + description: 'Type specifies if the TLS certificate + should be generated automatically, or if the + certificate is provided by the user. Allowed + values are: * SelfSigned: A certificate is going + to be generated using an own private key. * + Provided: Pre-existence of the TLS Secret (see + SecretName) with a valid certificate is assumed.' + enum: + - SelfSigned + - Provided + type: string + type: object + domain: + description: Domain specifies the DNS name for intercepting + ingress requests coming from outside the cluster. + Most likely, you will want to use a wildcard name, + like *.example.com. If not set, the domain of the + OpenShift Ingress is used. If you choose to generate + a certificate, this is the domain used for the certificate + request. + type: string + type: object + managementState: + default: Removed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + name: + default: knative-serving + description: Name specifies the name of the KNativeServing + resource that is going to be created to instruct the + KNative Operator to deploy KNative serving components. + This resource is created in the "knative-serving" namespace. + type: string + type: object type: object modelmeshserving: description: ModelMeshServing component configuration. Require diff --git a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml index 84abc1fa1e0..e805f082618 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml +++ b/config/crd/bases/dscinitialization.opendatahub.io_dscinitializations.yaml @@ -81,6 +81,48 @@ spec: description: Namespace for monitoring if it is enabled type: string type: object + serviceMesh: + description: Configures Service Mesh as networking layer for Data + Science Clusters components. The Service Mesh is a mandatory prerequisite + for single model serving (KServe) and you should review this configuration + if you are planning to use KServe. For other components, it enhances + user experience; e.g. it provides unified authentication giving + a Single Sign On experience. + properties: + controlPlane: + description: ControlPlane holds configuration of Service Mesh + used by Opendatahub. + properties: + metricsCollection: + default: Istio + description: MetricsCollection specifies if metrics from components + on the Mesh namespace should be collected. Setting the value + to "Istio" will collect metrics from the control plane and + any proxies on the Mesh namespace (like gateway pods). Setting + to "None" will disable metrics collection. + enum: + - Istio + - None + type: string + name: + default: data-science-smcp + description: Name is a name Service Mesh Control Plane. Defaults + to "data-science-smcp". + type: string + namespace: + default: istio-system + description: Namespace is a namespace where Service Mesh is + deployed. Defaults to "istio-system". + type: string + type: object + managementState: + default: Removed + enum: + - Managed + - Removed + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + type: object required: - applicationsNamespace type: object diff --git a/config/crd/bases/dscinitialization.opendatahub.io_featuretrackers.yaml b/config/crd/bases/features.opendatahub.io_featuretrackers.yaml similarity index 68% rename from config/crd/bases/dscinitialization.opendatahub.io_featuretrackers.yaml rename to config/crd/bases/features.opendatahub.io_featuretrackers.yaml index 3ad5b72560f..dc544d8f79f 100644 --- a/config/crd/bases/dscinitialization.opendatahub.io_featuretrackers.yaml +++ b/config/crd/bases/features.opendatahub.io_featuretrackers.yaml @@ -5,9 +5,9 @@ metadata: annotations: controller-gen.kubebuilder.io/version: v0.9.2 creationTimestamp: null - name: featuretrackers.dscinitialization.opendatahub.io + name: featuretrackers.features.opendatahub.io spec: - group: dscinitialization.opendatahub.io + group: features.opendatahub.io names: kind: FeatureTracker listKind: FeatureTrackerList @@ -18,10 +18,14 @@ spec: - name: v1 schema: openAPIV3Schema: - description: FeatureTracker is a cluster-scoped resource for tracking objects - created through Features API for Data Science Platform. It's primarily used - as owner reference for resources created across namespaces so that they - can be garbage collected by Kubernetes when they're not needed anymore. + description: FeatureTracker represents a cluster-scoped resource in the Data + Science Cluster, specifically designed for monitoring and managing objects + created via the internal Features API. This resource serves a crucial role + in cross-namespace resource management, acting as an owner reference for + various resources. The primary purpose of the FeatureTracker is to enable + efficient garbage collection by Kubernetes. This is essential for ensuring + that resources are automatically cleaned up and reclaimed when they are + no longer required. properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 6adcedb9a88..e2d0c9e42ea 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -3,8 +3,8 @@ # It should be run by config/default resources: - bases/dscinitialization.opendatahub.io_dscinitializations.yaml -- bases/dscinitialization.opendatahub.io_featuretrackers.yaml - bases/datasciencecluster.opendatahub.io_datascienceclusters.yaml +- bases/features.opendatahub.io_featuretrackers.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml index 80746cf5b78..bd90811a8d9 100644 --- a/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/opendatahub-operator.clusterserviceversion.yaml @@ -81,6 +81,13 @@ spec: - description: Enable monitoring on specified namespace displayName: Monitoring path: monitoring + - description: Configures Service Mesh as networking layer for Data Science + Clusters components. The Service Mesh is a mandatory prerequisite for single + model serving (KServe) and you should review this configuration if you are + planning to use KServe. For other components, it enhances user experience; + e.g. it provides unified authentication giving a Single Sign On experience. + displayName: Service Mesh + path: serviceMesh - description: Internal development useful field to test customizations. This is not recommended to be used in production environment. displayName: Dev Flags diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 10f06c2faa2..5f200494a55 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -300,6 +300,12 @@ rules: verbs: - list - watch +- apiGroups: + - config.openshift.io + resources: + - ingresses + verbs: + - get - apiGroups: - console.openshift.io resources: @@ -626,18 +632,6 @@ rules: - get - patch - update -- apiGroups: - - dscinitialization.opendatahub.io - resources: - - featuretrackers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - events.k8s.io resources: @@ -669,6 +663,18 @@ rules: - replicasets verbs: - '*' +- apiGroups: + - features.opendatahub.io + resources: + - featuretrackers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - image.openshift.io resources: @@ -944,6 +950,12 @@ rules: - deletecollection - get - patch +- apiGroups: + - networking.istio.io + resources: + - gateways + verbs: + - '*' - apiGroups: - networking.istio.io resources: @@ -1015,6 +1027,12 @@ rules: - patch - update - watch +- apiGroups: + - operator.knative.dev + resources: + - knativeservings + verbs: + - '*' - apiGroups: - operator.openshift.io resources: diff --git a/config/samples/featuretracker_v1_features.yaml b/config/samples/featuretracker_v1_features.yaml new file mode 100644 index 00000000000..f3f03bbf74a --- /dev/null +++ b/config/samples/featuretracker_v1_features.yaml @@ -0,0 +1,10 @@ +apiVersion: features.opendatahub.io/v1 +kind: FeatureTracker +metadata: + name: default-feature + labels: + app.kubernetes.io/name: featuretracker + app.kubernetes.io/instance: default-feature + app.kubernetes.io/part-of: opendatahub-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: opendatahub-operator diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 51284dc78c5..0a707a1fc5f 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,4 +5,5 @@ kind: Kustomization resources: - datasciencecluster_v1_datasciencecluster.yaml - dscinitialization_v1_dscinitialization.yaml +- featuretracker_v1_features.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/controllers/datasciencecluster/kubebuilder_rbac.go b/controllers/datasciencecluster/kubebuilder_rbac.go index 520b970f371..791e38669ed 100644 --- a/controllers/datasciencecluster/kubebuilder_rbac.go +++ b/controllers/datasciencecluster/kubebuilder_rbac.go @@ -4,6 +4,14 @@ package datasciencecluster //+kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters/finalizers,verbs=update;patch //+kubebuilder:rbac:groups="datasciencecluster.opendatahub.io",resources=datascienceclusters,verbs=get;list;watch;create;update;patch;delete +/* Service Mesh prerequisite */ +// +kubebuilder:rbac:groups="maistra.io",resources=servicemeshcontrolplanes,verbs=create;get;list;patch;update;use;watch + +/* Serverless prerequisite */ +// +kubebuilder:rbac:groups="networking.istio.io",resources=gateways,verbs=* +// +kubebuilder:rbac:groups="operator.knative.dev",resources=knativeservings,verbs=* +// +kubebuilder:rbac:groups="config.openshift.io",resources=ingresses,verbs=get + /* This is for DSP */ //+kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/status,verbs=update;patch;get //+kubebuilder:rbac:groups="datasciencepipelinesapplications.opendatahub.io",resources=datasciencepipelinesapplications/finalizers,verbs=update;patch diff --git a/controllers/dscinitialization/dscinitialization_controller.go b/controllers/dscinitialization/dscinitialization_controller.go index ca07cbf2a47..dfdd38c4c80 100644 --- a/controllers/dscinitialization/dscinitialization_controller.go +++ b/controllers/dscinitialization/dscinitialization_controller.go @@ -68,7 +68,7 @@ type DSCInitializationReconciler struct { // +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/status,verbs=get;update;patch;delete // +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations/finalizers,verbs=get;update;patch;delete // +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=dscinitializations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups="dscinitialization.opendatahub.io",resources=featuretrackers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups="features.opendatahub.io",resources=featuretrackers,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups="kfdef.apps.kubeflow.org",resources=kfdefs,verbs=get;list;watch;create;update;patch;delete // Reconcile contains controller logic specific to DSCInitialization instance updates. @@ -108,7 +108,9 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } else { r.Log.Info("Finalization DSCInitialization start deleting instance", "name", instance.Name, "finalizer", finalizerName) - // Add cleanup logic here + if err := r.removeServiceMesh(instance); err != nil { + return reconcile.Result{}, err + } if controllerutil.ContainsFinalizer(instance, finalizerName) { controllerutil.RemoveFinalizer(instance, finalizerName) if err := r.Update(ctx, instance); err != nil { @@ -261,6 +263,11 @@ func (r *DSCInitializationReconciler) Reconcile(ctx context.Context, req ctrl.Re } } + // Apply Service Mesh configurations + if errServiceMesh := r.configureServiceMesh(instance); errServiceMesh != nil { + return reconcile.Result{}, errServiceMesh + } + // Finish reconciling _, err = r.updateStatus(ctx, instance, func(saved *dsciv1.DSCInitialization) { status.SetCompleteCondition(&saved.Status.Conditions, status.ReconcileCompleted, status.ReconcileCompletedMessage) diff --git a/controllers/dscinitialization/dscinitialization_test.go b/controllers/dscinitialization/dscinitialization_test.go index 2e33d814e93..182cd5411e8 100644 --- a/controllers/dscinitialization/dscinitialization_test.go +++ b/controllers/dscinitialization/dscinitialization_test.go @@ -233,10 +233,16 @@ func cleanupResources() { defaultNamespace := client.InNamespace(workingNamespace) appNamespace := client.InNamespace(applicationNamespace) Expect(k8sClient.DeleteAllOf(context.TODO(), &dsci.DSCInitialization{}, defaultNamespace)).To(Succeed()) + Expect(k8sClient.DeleteAllOf(context.TODO(), &netv1.NetworkPolicy{}, appNamespace)).To(Succeed()) Expect(k8sClient.DeleteAllOf(context.TODO(), &corev1.ConfigMap{}, appNamespace)).To(Succeed()) Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.RoleBinding{}, appNamespace)).To(Succeed()) + Expect(k8sClient.DeleteAllOf(context.TODO(), &authv1.ClusterRoleBinding{}, appNamespace)).To(Succeed()) + Eventually(noInstanceExistsIn(workingNamespace, &dsci.DSCInitializationList{}), timeout, interval).Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.ClusterRoleBindingList{}), timeout, interval).Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &authv1.RoleBindingList{}), timeout, interval).Should(BeTrue()) + Eventually(noInstanceExistsIn(applicationNamespace, &corev1.ConfigMapList{}), timeout, interval).Should(BeTrue()) } func noInstanceExistsIn(namespace string, list client.ObjectList) func() bool { diff --git a/controllers/dscinitialization/servicemesh_setup.go b/controllers/dscinitialization/servicemesh_setup.go new file mode 100644 index 00000000000..ef45ba6118b --- /dev/null +++ b/controllers/dscinitialization/servicemesh_setup.go @@ -0,0 +1,104 @@ +package dscinitialization + +import ( + "path" + "path/filepath" + + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + + dsciv1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" +) + +const templatesDir = "templates/servicemesh" + +func (r *DSCInitializationReconciler) configureServiceMesh(instance *dsciv1.DSCInitialization) error { + if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { + serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) + + if err := serviceMeshInitializer.Prepare(); err != nil { + r.Log.Error(err, "failed configuring service mesh resources") + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") + + return err + } + + if err := serviceMeshInitializer.Apply(); err != nil { + r.Log.Error(err, "failed applying service mesh resources") + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed applying service mesh resources") + + return err + } + } + + return nil +} + +func (r *DSCInitializationReconciler) removeServiceMesh(instance *dsciv1.DSCInitialization) error { + if instance.Spec.ServiceMesh.ManagementState == operatorv1.Managed { + serviceMeshInitializer := feature.NewFeaturesInitializer(&instance.Spec, configureServiceMeshFeatures) + + if err := serviceMeshInitializer.Prepare(); err != nil { + r.Log.Error(err, "failed configuring service mesh resources") + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed configuring service mesh resources") + + return err + } + + if err := serviceMeshInitializer.Delete(); err != nil { + r.Log.Error(err, "failed deleting service mesh resources") + r.Recorder.Eventf(instance, corev1.EventTypeWarning, "DSCInitializationReconcileError", "failed deleting service mesh resources") + + return err + } + } + + return nil +} + +func configureServiceMeshFeatures(s *feature.FeaturesInitializer) error { + var rootDir = filepath.Join(feature.BaseOutputDir, s.DSCInitializationSpec.ApplicationsNamespace) + if err := feature.CopyEmbeddedFiles(templatesDir, rootDir); err != nil { + return err + } + + serviceMeshSpec := s.ServiceMesh + + smcpCreation, errSmcp := feature.CreateFeature("mesh-control-plane-creation"). + For(s.DSCInitializationSpec). + Manifests( + path.Join(rootDir, templatesDir, "/base"), + ). + PreConditions( + servicemesh.EnsureServiceMeshOperatorInstalled, + feature.CreateNamespace(serviceMeshSpec.ControlPlane.Namespace), + ). + PostConditions( + feature.WaitForPodsToBeReady(serviceMeshSpec.ControlPlane.Namespace), + ). + Load() + if errSmcp != nil { + return errSmcp + } + s.Features = append(s.Features, smcpCreation) + + if serviceMeshSpec.ControlPlane.MetricsCollection == "Istio" { + metricsCollection, errMetrics := feature.CreateFeature("mesh-metrics-collection"). + For(s.DSCInitializationSpec). + Manifests( + path.Join(rootDir, templatesDir, "metrics-collection"), + ). + PreConditions( + servicemesh.EnsureServiceMeshInstalled, + ). + Load() + if errMetrics != nil { + return errMetrics + } + s.Features = append(s.Features, metricsCollection) + } + + return nil +} diff --git a/go.sum b/go.sum index e5697ed476a..e0b38ea4558 100644 --- a/go.sum +++ b/go.sum @@ -754,6 +754,8 @@ github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47 github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.5.0/go.mod h1:Luc4sArBICYCS8THh8v3i3i5CuSZO+RaQRaJoeNwomw= +github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= github.com/onsi/ginkgo/v2 v2.12.1 h1:uHNEO1RP2SpuZApSkel9nEh1/Mu+hmQe7Q+Pepg5OYA= github.com/onsi/ginkgo/v2 v2.12.1/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= diff --git a/infrastructure/v1/cert_types.go b/infrastructure/v1/cert_types.go new file mode 100644 index 00000000000..6843d7a3c04 --- /dev/null +++ b/infrastructure/v1/cert_types.go @@ -0,0 +1,23 @@ +package v1 + +type CertType string + +const ( + SelfSigned CertType = "SelfSigned" + Provided CertType = "Provided" +) + +// CertificateSpec represents the specification of the certificate securing communications of +// an Istio Gateway. +type CertificateSpec struct { + // SecretName specifies the name of the Kubernetes Secret resource that contains a + // TLS certificate secure HTTP communications for the KNative network. + SecretName string `json:"secretName,omitempty"` + // Type specifies if the TLS certificate should be generated automatically, or if the certificate + // is provided by the user. Allowed values are: + // * SelfSigned: A certificate is going to be generated using an own private key. + // * Provided: Pre-existence of the TLS Secret (see SecretName) with a valid certificate is assumed. + // +kubebuilder:validation:Enum=SelfSigned;Provided + // +kubebuilder:default=SelfSigned + Type CertType `json:"type,omitempty"` +} diff --git a/infrastructure/v1/serverless_types.go b/infrastructure/v1/serverless_types.go new file mode 100644 index 00000000000..37155c1488e --- /dev/null +++ b/infrastructure/v1/serverless_types.go @@ -0,0 +1,21 @@ +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" +) + +// ServingSpec specifies the configuration for the KNative Serving components and their +// bindings with the Service Mesh. +type ServingSpec struct { + // +kubebuilder:validation:Enum=Managed;Removed + // +kubebuilder:default=Removed + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` + // Name specifies the name of the KNativeServing resource that is going to be + // created to instruct the KNative Operator to deploy KNative serving components. + // This resource is created in the "knative-serving" namespace. + // +kubebuilder:default=knative-serving + Name string `json:"name,omitempty"` + // IngressGateway allows to customize some parameters for the Istio Ingress Gateway + // that is bound to KNative-Serving. + IngressGateway IngressGatewaySpec `json:"ingressGateway,omitempty"` +} diff --git a/infrastructure/v1/servicemesh_types.go b/infrastructure/v1/servicemesh_types.go new file mode 100644 index 00000000000..db4af44d712 --- /dev/null +++ b/infrastructure/v1/servicemesh_types.go @@ -0,0 +1,40 @@ +package v1 + +import operatorv1 "github.com/openshift/api/operator/v1" + +// ServiceMeshSpec configures Service Mesh. +type ServiceMeshSpec struct { + // +kubebuilder:validation:Enum=Managed;Removed + // +kubebuilder:default=Removed + ManagementState operatorv1.ManagementState `json:"managementState,omitempty"` + // ControlPlane holds configuration of Service Mesh used by Opendatahub. + ControlPlane ControlPlaneSpec `json:"controlPlane,omitempty"` +} + +type ControlPlaneSpec struct { + // Name is a name Service Mesh Control Plane. Defaults to "data-science-smcp". + // +kubebuilder:default=data-science-smcp + Name string `json:"name,omitempty"` + // Namespace is a namespace where Service Mesh is deployed. Defaults to "istio-system". + // +kubebuilder:default=istio-system + Namespace string `json:"namespace,omitempty"` + // MetricsCollection specifies if metrics from components on the Mesh namespace + // should be collected. Setting the value to "Istio" will collect metrics from the + // control plane and any proxies on the Mesh namespace (like gateway pods). Setting + // to "None" will disable metrics collection. + // +kubebuilder:validation:Enum=Istio;None + // +kubebuilder:default=Istio + MetricsCollection string `json:"metricsCollection,omitempty"` +} + +// IngressGatewaySpec represents the configuration of the Ingress Gateways. +type IngressGatewaySpec struct { + // Domain specifies the DNS name for intercepting ingress requests coming from + // outside the cluster. Most likely, you will want to use a wildcard name, + // like *.example.com. If not set, the domain of the OpenShift Ingress is used. + // If you choose to generate a certificate, this is the domain used for the certificate request. + Domain string `json:"domain,omitempty"` + // Certificate specifies configuration of the TLS certificate securing communications of + // the for Ingress Gateway. + Certificate CertificateSpec `json:"certificate,omitempty"` +} diff --git a/pkg/feature/builder.go b/pkg/feature/builder.go index 0d9b7043f85..468f523723c 100644 --- a/pkg/feature/builder.go +++ b/pkg/feature/builder.go @@ -10,6 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) type partialBuilder func(f *Feature) error @@ -26,7 +27,9 @@ func CreateFeature(name string) *featureBuilder { func (fb *featureBuilder) For(spec *v1.DSCInitializationSpec) *featureBuilder { createSpec := func(f *Feature) error { f.Spec = &Spec{ - AppNamespace: spec.ApplicationsNamespace, + ServiceMeshSpec: &spec.ServiceMesh, + Serving: &infrav1.ServingSpec{}, + AppNamespace: spec.ApplicationsNamespace, } return nil diff --git a/pkg/feature/cert.go b/pkg/feature/cert.go index 1bb6eb70874..020b2dea1b6 100644 --- a/pkg/feature/cert.go +++ b/pkg/feature/cert.go @@ -2,6 +2,7 @@ package feature import ( "bytes" + "context" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" @@ -15,11 +16,46 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) +func (f *Feature) CreateSelfSignedCertificate(secretName string, certificateType infrav1.CertType, domain, namespace string) error { + if certificateType != infrav1.SelfSigned { + return nil + } + + meta := metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + OwnerReferences: []metav1.OwnerReference{ + f.OwnerReference(), + }, + } + + cert, err := GenerateSelfSignedCertificateAsSecret(domain, meta) + if err != nil { + return errors.WithStack(err) + } + + if err != nil { + return errors.WithStack(err) + } + + _, err = f.Clientset.CoreV1(). + Secrets(namespace). + Create(context.TODO(), cert, metav1.CreateOptions{}) + if err != nil && !k8serrors.IsAlreadyExists(err) { + return errors.WithStack(err) + } + + return nil +} + func GenerateSelfSignedCertificateAsSecret(addr string, objectMeta metav1.ObjectMeta) (*corev1.Secret, error) { cert, key, err := generateCertificate(addr) if err != nil { diff --git a/pkg/feature/conditions.go b/pkg/feature/conditions.go index dbc584933fa..4f1ca7a7e60 100644 --- a/pkg/feature/conditions.go +++ b/pkg/feature/conditions.go @@ -37,12 +37,16 @@ func WaitForPodsToBeReady(namespace string) Action { for _, pod := range podList.Items { podReady := true - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady { - if condition.Status != corev1.ConditionTrue { - podReady = false - - break + // Consider a "PodSucceeded" as ready, since these will never will + // be in Ready condition (i.e. Jobs that already completed). + if pod.Status.Phase != corev1.PodSucceeded { + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady { + if condition.Status != corev1.ConditionTrue { + podReady = false + + break + } } } } diff --git a/pkg/feature/feature.go b/pkg/feature/feature.go index 5e3470404e8..a2a68d5c704 100644 --- a/pkg/feature/feature.go +++ b/pkg/feature/feature.go @@ -15,7 +15,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" - v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/common" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" ) @@ -196,9 +196,9 @@ func (f *Feature) OwnerReference() metav1.OwnerReference { // It's a cluster-scoped resource. Once created, there's a cleanup hook added which will be invoked on deletion, resulting // in removal of all owned resources which belong to this Feature. func (f *Feature) createResourceTracker() error { - tracker := &v1.FeatureTracker{ + tracker := &featurev1.FeatureTracker{ TypeMeta: metav1.TypeMeta{ - APIVersion: "dscinitialization.opendatahub.io/v1", + APIVersion: "features.opendatahub.io/v1", Kind: "FeatureTracker", }, ObjectMeta: metav1.ObjectMeta{ @@ -223,7 +223,7 @@ func (f *Feature) createResourceTracker() error { return err } - f.Spec.Tracker = &v1.FeatureTracker{} + f.Spec.Tracker = &featurev1.FeatureTracker{} if err := runtime.DefaultUnstructuredConverter.FromUnstructured(foundTracker.Object, f.Spec.Tracker); err != nil { return err } diff --git a/pkg/feature/manifest.go b/pkg/feature/manifest.go index 9bf79d2049e..e741c65b307 100644 --- a/pkg/feature/manifest.go +++ b/pkg/feature/manifest.go @@ -10,6 +10,8 @@ import ( "github.com/pkg/errors" ) +const BaseOutputDir = "/tmp/odh-operator" + type manifest struct { name, path string diff --git a/pkg/feature/resources.go b/pkg/feature/resources.go index 43f955f4b0b..821673aace2 100644 --- a/pkg/feature/resources.go +++ b/pkg/feature/resources.go @@ -3,15 +3,34 @@ package feature import ( "context" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/opendatahub-io/opendatahub-operator/v2/pkg/cluster" ) // CreateNamespace will create namespace with the given name if it does not exist yet and sets feature as an owner of it. -// This way we ensure that when the feature is cleaned up, the namespace will be deleted as well. +// This way we ensure that when the feature is cleaned up, the namespace will be deleted as well. If the namespace +// already exists, no action will be performed. func CreateNamespace(namespace string) Action { return func(f *Feature) error { + // Despite the cluster.CreateNamespace function already checks if the target + // namespace exists, it seems relevant to do the check here. Otherwise, we may + // set or change the owner reference of an existent namespace, and that would lead + // to namespace deletion for cases where it is better to not terminate it. + foundNamespace := &corev1.Namespace{} + err := f.Client.Get(context.TODO(), client.ObjectKey{Name: namespace}, foundNamespace) + if err != nil { + if !apierrs.IsNotFound(err) { + return err + } + } else { + // Namespace exists. We do no-op. + return nil + } + createdNs, err := cluster.CreateNamespace(f.Client, namespace) if err != nil { return err diff --git a/pkg/feature/serverless/conditions.go b/pkg/feature/serverless/conditions.go new file mode 100644 index 00000000000..12714fc0bf5 --- /dev/null +++ b/pkg/feature/serverless/conditions.go @@ -0,0 +1,52 @@ +package serverless + +import ( + "context" + "fmt" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" +) + +var log = ctrlLog.Log.WithName("features") + +func EnsureServerlessAbsent(f *feature.Feature) error { + list, err := f.DynamicClient.Resource(gvr.KnativeServing).Namespace("").List(context.TODO(), v1.ListOptions{}) + if err != nil { + return err + } + + if len(list.Items) == 0 { + return nil + } + + if len(list.Items) > 1 { + return fmt.Errorf("multiple KNativeServing resources found, which is an unsupported state") + } + + servingOwners := list.Items[0].GetOwnerReferences() + featureOwner := f.OwnerReference() + for _, owner := range servingOwners { + if owner.APIVersion == featureOwner.APIVersion && + owner.Kind == featureOwner.Kind && + owner.Name == featureOwner.Name && + owner.UID == featureOwner.UID { + return nil + } + } + + return fmt.Errorf("existing KNativeServing resource was found; integrating to an existing installation is not supported") +} + +func EnsureServerlessOperatorInstalled(f *feature.Feature) error { + if err := feature.EnsureCRDIsInstalled("knativeservings.operator.knative.dev")(f); err != nil { + log.Info("Failed to find the pre-requisite KNative Serving Operator CRD, please ensure Serverless Operator is installed.", "feature", f.Name) + + return err + } + + return nil +} diff --git a/pkg/feature/serverless/loaders.go b/pkg/feature/serverless/loaders.go new file mode 100644 index 00000000000..115034dbdcc --- /dev/null +++ b/pkg/feature/serverless/loaders.go @@ -0,0 +1,36 @@ +package serverless + +import ( + "fmt" + "strings" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" +) + +const DefaultCertificateSecretName = "knative-serving-cert" + +func ServingDefaultValues(f *feature.Feature) error { + certificateSecretName := strings.TrimSpace(f.Spec.Serving.IngressGateway.Certificate.SecretName) + if len(certificateSecretName) == 0 { + certificateSecretName = DefaultCertificateSecretName + } + + f.Spec.KnativeCertificateSecret = certificateSecretName + return nil +} + +func ServingIngressDomain(f *feature.Feature) error { + domain := strings.TrimSpace(f.Spec.Serving.IngressGateway.Domain) + if len(domain) == 0 { + var errDomain error + domain, errDomain = GetDomain(f.DynamicClient) + if errDomain != nil { + return fmt.Errorf("failed to fetch OpenShift domain to generate certificate for Serverless: %w", errDomain) + } + + domain = "*." + domain + } + + f.Spec.KnativeIngressDomain = domain + return nil +} diff --git a/pkg/feature/serverless/resources.go b/pkg/feature/serverless/resources.go new file mode 100644 index 00000000000..d06e4f1ae21 --- /dev/null +++ b/pkg/feature/serverless/resources.go @@ -0,0 +1,30 @@ +package serverless + +import ( + "context" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/dynamic" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" +) + +func ServingCertificateResource(f *feature.Feature) error { + return f.CreateSelfSignedCertificate(f.Spec.KnativeCertificateSecret, f.Spec.Serving.IngressGateway.Certificate.Type, f.Spec.KnativeIngressDomain, f.Spec.ControlPlane.Namespace) +} + +func GetDomain(dynamicClient dynamic.Interface) (string, error) { + cluster, err := dynamicClient.Resource(gvr.OpenshiftIngress).Get(context.TODO(), "cluster", metav1.GetOptions{}) + if err != nil { + return "", err + } + + domain, found, err := unstructured.NestedString(cluster.Object, "spec", "domain") + if !found { + return "", errors.New("spec.domain not found") + } + return domain, err +} diff --git a/pkg/feature/servicemesh/conditions.go b/pkg/feature/servicemesh/conditions.go new file mode 100644 index 00000000000..da8e17e51e9 --- /dev/null +++ b/pkg/feature/servicemesh/conditions.go @@ -0,0 +1,88 @@ +package servicemesh + +import ( + "context" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/dynamic" + ctrlLog "sigs.k8s.io/controller-runtime/pkg/log" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" +) + +var log = ctrlLog.Log.WithName("features") + +const ( + interval = 2 * time.Second + duration = 5 * time.Minute +) + +func EnsureServiceMeshOperatorInstalled(f *feature.Feature) error { + if err := feature.EnsureCRDIsInstalled("servicemeshcontrolplanes.maistra.io")(f); err != nil { + log.Info("Failed to find the pre-requisite Service Mesh Control Plane CRD, please ensure Service Mesh Operator is installed.", "feature", f.Name) + + return err + } + + return nil +} + +func EnsureServiceMeshInstalled(f *feature.Feature) error { + if err := EnsureServiceMeshOperatorInstalled(f); err != nil { + return err + } + + smcp := f.Spec.ControlPlane.Name + smcpNs := f.Spec.ControlPlane.Namespace + + if err := WaitForControlPlaneToBeReady(f); err != nil { + log.Error(err, "failed waiting for control plane being ready", "feature", f.Name, "control-plane", smcp, "namespace", smcpNs) + + return multierror.Append(err, errors.New("service mesh control plane is not ready")).ErrorOrNil() + } + + return nil +} + +func WaitForControlPlaneToBeReady(feature *feature.Feature) error { + smcp := feature.Spec.ControlPlane.Name + smcpNs := feature.Spec.ControlPlane.Namespace + + log.Info("waiting for control plane components to be ready", "feature", feature.Name, "control-plane", smcp, "namespace", smcpNs, "duration (s)", duration.Seconds()) + + return wait.PollUntilContextTimeout(context.TODO(), interval, duration, false, func(ctx context.Context) (bool, error) { + ready, err := CheckControlPlaneComponentReadiness(feature.DynamicClient, smcp, smcpNs) + + if ready { + log.Info("done waiting for control plane components to be ready", "feature", feature.Name, "control-plane", smcp, "namespace", smcpNs) + } + + return ready, err + }) +} + +func CheckControlPlaneComponentReadiness(dynamicClient dynamic.Interface, smcp, smcpNs string) (bool, error) { + unstructObj, err := dynamicClient.Resource(gvr.SMCP).Namespace(smcpNs).Get(context.TODO(), smcp, metav1.GetOptions{}) + if err != nil { + log.Info("failed to find Service Mesh Control Plane", "control-plane", smcp, "namespace", smcpNs) + return false, err + } + + components, found, err := unstructured.NestedMap(unstructObj.Object, "status", "readiness", "components") + if err != nil || !found { + log.Info("status conditions not found or error in parsing of Service Mesh Control Plane") + return false, err + } + + readyComponents := len(components["ready"].([]interface{})) + pendingComponents := len(components["pending"].([]interface{})) + unreadyComponents := len(components["unready"].([]interface{})) + + return pendingComponents == 0 && unreadyComponents == 0 && readyComponents > 0, nil +} diff --git a/pkg/feature/templates/namespace.patch.tmpl b/pkg/feature/templates/namespace.patch.tmpl deleted file mode 100644 index 98b12916ebd..00000000000 --- a/pkg/feature/templates/namespace.patch.tmpl +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .AppNamespace }} - annotations: - opendatahub.io/service-mesh: "true" - \ No newline at end of file diff --git a/pkg/feature/templates/serverless/serving-install/knative-serving.tmpl b/pkg/feature/templates/serverless/serving-install/knative-serving.tmpl new file mode 100644 index 00000000000..8dfc5b7fe68 --- /dev/null +++ b/pkg/feature/templates/serverless/serving-install/knative-serving.tmpl @@ -0,0 +1,23 @@ +apiVersion: operator.knative.dev/v1beta1 +kind: KnativeServing +metadata: + name: {{ .Serving.Name }} + namespace: knative-serving + annotations: + serverless.openshift.io/default-enable-http2: "true" +spec: + deployments: + - annotations: + sidecar.istio.io/inject: "true" + sidecar.istio.io/rewriteAppHTTPProbers: "true" + name: activator + - annotations: + sidecar.istio.io/inject: "true" + sidecar.istio.io/rewriteAppHTTPProbers: "true" + name: autoscaler + ingress: + istio: + enabled: true + config: + istio: + local-gateway.knative-serving.knative-local-gateway: "knative-local-gateway.{{ .ControlPlane.Namespace }}.svc.cluster.local" diff --git a/pkg/feature/templates/serverless/serving-install/service-mesh-subscription.tmpl b/pkg/feature/templates/serverless/serving-install/service-mesh-subscription.tmpl new file mode 100644 index 00000000000..2647e64a620 --- /dev/null +++ b/pkg/feature/templates/serverless/serving-install/service-mesh-subscription.tmpl @@ -0,0 +1,9 @@ +apiVersion: maistra.io/v1 +kind: ServiceMeshMember +metadata: + name: default + namespace: knative-serving +spec: + controlPlaneRef: + namespace: {{ .ControlPlane.Namespace }} + name: {{ .ControlPlane.Name }} diff --git a/pkg/feature/templates/serverless/serving-istio-gateways/istio-ingress-gateway.tmpl b/pkg/feature/templates/serverless/serving-istio-gateways/istio-ingress-gateway.tmpl new file mode 100644 index 00000000000..67a18c608a7 --- /dev/null +++ b/pkg/feature/templates/serverless/serving-istio-gateways/istio-ingress-gateway.tmpl @@ -0,0 +1,18 @@ +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: knative-ingress-gateway + namespace: knative-serving +spec: + selector: + knative: ingressgateway + servers: + - hosts: + - '{{ .KnativeIngressDomain }}' + port: + name: https + number: 443 + protocol: HTTPS + tls: + credentialName: {{ .KnativeCertificateSecret }} + mode: SIMPLE diff --git a/pkg/feature/templates/serverless/serving-istio-gateways/istio-local-gateway.tmpl b/pkg/feature/templates/serverless/serving-istio-gateways/istio-local-gateway.tmpl new file mode 100644 index 00000000000..dc8bfefa650 --- /dev/null +++ b/pkg/feature/templates/serverless/serving-istio-gateways/istio-local-gateway.tmpl @@ -0,0 +1,17 @@ +apiVersion: networking.istio.io/v1beta1 +kind: Gateway +metadata: + name: knative-local-gateway + namespace: knative-serving +spec: + selector: + knative: ingressgateway + servers: + - hosts: + - '*.svc.cluster.local' + port: + name: https + number: 8081 + protocol: HTTPS + tls: + mode: ISTIO_MUTUAL diff --git a/pkg/feature/templates/serverless/serving-istio-gateways/local-gateway-svc.tmpl b/pkg/feature/templates/serverless/serving-istio-gateways/local-gateway-svc.tmpl new file mode 100644 index 00000000000..16f9fc5d8c9 --- /dev/null +++ b/pkg/feature/templates/serverless/serving-istio-gateways/local-gateway-svc.tmpl @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + experimental.istio.io/disable-gateway-port-translation: "true" + name: knative-local-gateway + namespace: {{ .ControlPlane.Namespace }} +spec: + ports: + - name: http2 + port: 80 + protocol: TCP + targetPort: 8081 + selector: + knative: ingressgateway + type: ClusterIP diff --git a/pkg/feature/templates/servicemesh/base/smcp.tmpl b/pkg/feature/templates/servicemesh/base/smcp.tmpl new file mode 100644 index 00000000000..f9347b61055 --- /dev/null +++ b/pkg/feature/templates/servicemesh/base/smcp.tmpl @@ -0,0 +1,38 @@ +apiVersion: maistra.io/v2 +kind: ServiceMeshControlPlane +metadata: + name: {{ .ControlPlane.Name }} + namespace: {{ .ControlPlane.Namespace }} +spec: + tracing: + type: None + addons: + grafana: + enabled: false + kiali: + name: kiali + enabled: false + prometheus: + enabled: false + jaeger: + name: jaeger + security: + dataPlane: + mtls: true # otherwise inference-graph will not work. We use PeerAuthentication resources to force mTLS + techPreview: + meshConfig: + defaultConfig: + terminationDrainDuration: 35s + gateways: + ingress: + service: + metadata: + labels: + knative: ingressgateway + proxy: + networking: + trafficControl: + inbound: + excludedPorts: + - 8444 # metrics + - 8022 # serving: wait-for-drain k8s pre-stop hook diff --git a/pkg/feature/templates/servicemesh/metrics-collection/envoy-metrics-collection.tmpl b/pkg/feature/templates/servicemesh/metrics-collection/envoy-metrics-collection.tmpl new file mode 100644 index 00000000000..7683640aafe --- /dev/null +++ b/pkg/feature/templates/servicemesh/metrics-collection/envoy-metrics-collection.tmpl @@ -0,0 +1,13 @@ +apiVersion: monitoring.coreos.com/v1 +kind: PodMonitor +metadata: + name: {{ .ControlPlane.Name }}-envoy-monitor + namespace: {{ .ControlPlane.Namespace }} +spec: + selector: + matchExpressions: + - key: istio-prometheus-ignore + operator: DoesNotExist + podMetricsEndpoints: + - path: /stats/prometheus + interval: 30s diff --git a/pkg/feature/templates/servicemesh/metrics-collection/pilot-metrics-collection.tmpl b/pkg/feature/templates/servicemesh/metrics-collection/pilot-metrics-collection.tmpl new file mode 100644 index 00000000000..736c55ae85d --- /dev/null +++ b/pkg/feature/templates/servicemesh/metrics-collection/pilot-metrics-collection.tmpl @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ .ControlPlane.Name }}-pilot-monitor + namespace: {{ .ControlPlane.Namespace }} +spec: + targetLabels: + - app + selector: + matchLabels: + istio: pilot + endpoints: + - port: http-monitoring + interval: 30s diff --git a/pkg/feature/types.go b/pkg/feature/types.go index 8f9b6011119..7b5da4cc7b8 100644 --- a/pkg/feature/types.go +++ b/pkg/feature/types.go @@ -3,14 +3,19 @@ package feature import ( "strings" - v1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" ) type Spec struct { - OAuth OAuth - AppNamespace string - Domain string - Tracker *v1.FeatureTracker + *infrav1.ServiceMeshSpec + Serving *infrav1.ServingSpec + OAuth OAuth + AppNamespace string + Domain string + KnativeCertificateSecret string + KnativeIngressDomain string + Tracker *featurev1.FeatureTracker } type OAuth struct { diff --git a/pkg/gvr/gvr.go b/pkg/gvr/gvr.go index a10b0fdae50..c970b82d3bd 100644 --- a/pkg/gvr/gvr.go +++ b/pkg/gvr/gvr.go @@ -3,9 +3,27 @@ package gvr import "k8s.io/apimachinery/pkg/runtime/schema" var ( + KnativeServing = schema.GroupVersionResource{ + Group: "operator.knative.dev", + Version: "v1beta1", + Resource: "knativeservings", + } + + OpenshiftIngress = schema.GroupVersionResource{ + Group: "config.openshift.io", + Version: "v1", + Resource: "ingresses", + } + ResourceTracker = schema.GroupVersionResource{ - Group: "dscinitialization.opendatahub.io", + Group: "features.opendatahub.io", Version: "v1", Resource: "featuretrackers", } + + SMCP = schema.GroupVersionResource{ + Group: "maistra.io", + Version: "v2", + Resource: "servicemeshcontrolplanes", + } ) diff --git a/tests/e2e/controller_setup_test.go b/tests/e2e/controller_setup_test.go index c2131cb6874..4bad1d83f9d 100644 --- a/tests/e2e/controller_setup_test.go +++ b/tests/e2e/controller_setup_test.go @@ -24,6 +24,7 @@ import ( dsc "github.com/opendatahub-io/opendatahub-operator/v2/apis/datasciencecluster/v1" dsci "github.com/opendatahub-io/opendatahub-operator/v2/apis/dscinitialization/v1" + featurev1 "github.com/opendatahub-io/opendatahub-operator/v2/apis/features/v1" ) var ( @@ -105,6 +106,7 @@ func TestOdhOperator(t *testing.T) { utilruntime.Must(autoscalingv1.AddToScheme(scheme)) utilruntime.Must(dsci.AddToScheme(scheme)) utilruntime.Must(dsc.AddToScheme(scheme)) + utilruntime.Must(featurev1.AddToScheme(scheme)) utilruntime.Must(monitoringv1.AddToScheme(scheme)) // individual test suites after the operator is running diff --git a/tests/integration/features/crd/openshift-ingresses.yaml b/tests/integration/features/crd/openshift-ingresses.yaml new file mode 100644 index 00000000000..b1b3f412803 --- /dev/null +++ b/tests/integration/features/crd/openshift-ingresses.yaml @@ -0,0 +1,548 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ingresses.config.openshift.io +spec: + conversion: + strategy: None + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Ingress holds cluster-wide information about ingress, including + the default ingress domain used for routes. The canonical name is `cluster`. + \n Compatibility level 1: Stable within a major release for a minimum of + 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + appsDomain: + description: appsDomain is an optional domain to use instead of the + one specified in the domain field when a Route is created without + specifying an explicit host. If appsDomain is nonempty, this value + is used to generate default host values for Route. Unlike domain, + appsDomain may be modified after installation. This assumes a new + ingresscontroller has been setup with a wildcard certificate. + type: string + componentRoutes: + description: "componentRoutes is an optional list of routes that are + managed by OpenShift components that a cluster-admin is able to + configure the hostname and serving certificate for. The namespace + and name of each route in this list should match an existing entry + in the status.componentRoutes list. \n To determine the set of configurable + Routes, look at namespace and name of entries in the .status.componentRoutes + list, where participating operators write the status of configurable + routes." + items: + description: ComponentRouteSpec allows for configuration of a route's + hostname and serving certificate. + properties: + hostname: + description: hostname is the hostname that should be used by + the route. + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + name: + description: "name is the logical name of the route to customize. + \n The namespace and name of this componentRoute must match + a corresponding entry in the list of status.componentRoutes + if the route is to be customized." + maxLength: 256 + minLength: 1 + type: string + namespace: + description: "namespace is the namespace of the route to customize. + \n The namespace and name of this componentRoute must match + a corresponding entry in the list of status.componentRoutes + if the route is to be customized." + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + servingCertKeyPairSecret: + description: servingCertKeyPairSecret is a reference to a secret + of type `kubernetes.io/tls` in the openshift-config namespace. + The serving cert/key pair must match and will be used by the + operator to fulfill the intent of serving with this name. + If the custom hostname uses the default routing suffix of + the cluster, the Secret specification for a serving certificate + will not be needed. + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + required: + - name + type: object + required: + - hostname + - name + - namespace + type: object + type: array + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + domain: + description: "domain is used to generate a default host name for a + route when the route's host name is empty. The generated host name + will follow this pattern: \"..\". + \n It is also used as the default wildcard domain suffix for ingress. + The default ingresscontroller domain will follow this pattern: \"*.\". + \n Once set, changing domain is not currently supported." + type: string + loadBalancer: + description: loadBalancer contains the load balancer details in general + which are not only specific to the underlying infrastructure provider + of the current cluster and are required for Ingress Controller to + work on OpenShift. + properties: + platform: + description: platform holds configuration specific to the underlying + infrastructure provider for the ingress load balancers. When + omitted, this means the user has no opinion and the platform + is left to choose reasonable defaults. These defaults are subject + to change over time. + properties: + aws: + description: aws contains settings specific to the Amazon + Web Services infrastructure provider. + properties: + type: + description: "type allows user to set a load balancer + type. When this field is set the default ingresscontroller + will get created using the specified LBType. If this + field is not set then the default ingress controller + of LBType Classic will be created. Valid values are: + \n * \"Classic\": A Classic Load Balancer that makes + routing decisions at either the transport layer (TCP/SSL) + or the application layer (HTTP/HTTPS). See the following + for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#clb + \n * \"NLB\": A Network Load Balancer that makes routing + decisions at the transport layer (TCP/SSL). See the + following for additional details: \n https://docs.aws.amazon.com/AmazonECS/latest/developerguide/load-balancer-types.html#nlb" + enum: + - NLB + - Classic + type: string + required: + - type + type: object + type: + description: type is the underlying infrastructure provider + for the cluster. Allowed values are "AWS", "Azure", "BareMetal", + "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", + "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and + "None". Individual components may not support all platforms, + and must handle unrecognized platforms as None if they do + not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + type: object + type: object + requiredHSTSPolicies: + description: "requiredHSTSPolicies specifies HSTS policies that are + required to be set on newly created or updated routes matching + the domainPattern/s and namespaceSelector/s that are specified in + the policy. Each requiredHSTSPolicy must have at least a domainPattern + and a maxAge to validate a route HSTS Policy route annotation, and + affect route admission. \n A candidate route is checked for HSTS + Policies if it has the HSTS Policy route annotation: \"haproxy.router.openshift.io/hsts_header\" + E.g. haproxy.router.openshift.io/hsts_header: max-age=31536000;preload;includeSubDomains + \n - For each candidate route, if it matches a requiredHSTSPolicy + domainPattern and optional namespaceSelector, then the maxAge, preloadPolicy, + and includeSubdomainsPolicy must be valid to be admitted. Otherwise, + the route is rejected. - The first match, by domainPattern and optional + namespaceSelector, in the ordering of the RequiredHSTSPolicies determines + the route's admission status. - If the candidate route doesn't match + any requiredHSTSPolicy domainPattern and optional namespaceSelector, + then it may use any HSTS Policy annotation. \n The HSTS policy configuration + may be changed after routes have already been created. An update + to a previously admitted route may then fail if the updated route + does not conform to the updated HSTS policy configuration. However, + changing the HSTS policy configuration will not cause a route that + is already admitted to stop working. \n Note that if there are no + RequiredHSTSPolicies, any HSTS Policy annotation on the route is + valid." + items: + properties: + domainPatterns: + description: "domainPatterns is a list of domains for which + the desired HSTS annotations are required. If domainPatterns + is specified and a route is created with a spec.host matching + one of the domains, the route must specify the HSTS Policy + components described in the matching RequiredHSTSPolicy. \n + The use of wildcards is allowed like this: *.foo.com matches + everything under foo.com. foo.com only matches foo.com, so + to cover foo.com and everything under it, you must specify + *both*." + items: + type: string + minItems: 1 + type: array + includeSubDomainsPolicy: + description: 'includeSubDomainsPolicy means the HSTS Policy + should apply to any subdomains of the host''s domain name. Thus, + for the host bar.foo.com, if includeSubDomainsPolicy was set + to RequireIncludeSubDomains: - the host app.bar.foo.com would + inherit the HSTS Policy of bar.foo.com - the host bar.foo.com + would inherit the HSTS Policy of bar.foo.com - the host foo.com + would NOT inherit the HSTS Policy of bar.foo.com - the host + def.foo.com would NOT inherit the HSTS Policy of bar.foo.com' + enum: + - RequireIncludeSubDomains + - RequireNoIncludeSubDomains + - NoOpinion + type: string + maxAge: + description: maxAge is the delta time range in seconds during + which hosts are regarded as HSTS hosts. If set to 0, it negates + the effect, and hosts are removed as HSTS hosts. If set to + 0 and includeSubdomains is specified, all subdomains of the + host are also removed as HSTS hosts. maxAge is a time-to-live + value, and if this policy is not refreshed on a client, the + HSTS policy will eventually expire on that client. + properties: + largestMaxAge: + description: The largest allowed value (in seconds) of the + RequiredHSTSPolicy max-age This value can be left unspecified, + in which case no upper limit is enforced. + format: int32 + maximum: 2147483647 + minimum: 0 + type: integer + smallestMaxAge: + description: The smallest allowed value (in seconds) of + the RequiredHSTSPolicy max-age Setting max-age=0 allows + the deletion of an existing HSTS header from a host. This + is a necessary tool for administrators to quickly correct + mistakes. This value can be left unspecified, in which + case no lower limit is enforced. + format: int32 + maximum: 2147483647 + minimum: 0 + type: integer + type: object + namespaceSelector: + description: namespaceSelector specifies a label selector such + that the policy applies only to those routes that are in namespaces + with labels that match the selector, and are in one of the + DomainPatterns. Defaults to the empty LabelSelector, which + matches everything. + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. + If the operator is In or NotIn, the values array + must be non-empty. If the operator is Exists or + DoesNotExist, the values array must be empty. This + array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. + A single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is + "key", the operator is "In", and the values array contains + only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + preloadPolicy: + description: preloadPolicy directs the client to include hosts + in its host preload list so that it never needs to do an initial + load to get the HSTS header (note that this is not defined + in RFC 6797 and is therefore client implementation-dependent). + enum: + - RequirePreload + - RequireNoPreload + - NoOpinion + type: string + required: + - domainPatterns + type: object + type: array + type: object + status: + description: status holds observed values from the cluster. They may not + be overridden. + properties: + componentRoutes: + description: componentRoutes is where participating operators place + the current route status for routes whose hostnames and serving + certificates can be customized by the cluster-admin. + items: + description: ComponentRouteStatus contains information allowing + configuration of a route's hostname and serving certificate. + properties: + conditions: + description: "conditions are used to communicate the state of + the componentRoutes entry. \n Supported conditions include + Available, Degraded and Progressing. \n If available is true, + the content served by the route can be accessed by users. + This includes cases where a default may continue to serve + content while the customized route specified by the cluster-admin + is being configured. \n If Degraded is true, that means something + has gone wrong trying to handle the componentRoutes entry. + The currentHostnames field may or may not be in effect. \n + If Progressing is true, that means the component is taking + some action related to the componentRoutes entry." + items: + description: "Condition contains details for one aspect of + the current state of this API Resource. --- This struct + is intended for direct use as an array at the field path + .status.conditions. For example, \n type FooStatus struct{ + // Represents the observations of a foo's current state. + // Known .status.conditions.type are: \"Available\", \"Progressing\", + and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields + }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should + be when the underlying condition changed. If that is + not known, then using the time when the API field changed + is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, + if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the + current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier + indicating the reason for the condition's last transition. + Producers of specific condition types may define expected + values and meanings for this field, and whether the + values are considered a guaranteed API. The value should + be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across + resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability + to deconflict is important. The regex it matches is + (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + consumingUsers: + description: consumingUsers is a slice of ServiceAccounts that + need to have read permission on the servingCertKeyPairSecret + secret. + items: + description: ConsumingUser is an alias for string which we + add validation to. Currently only service accounts are supported. + maxLength: 512 + minLength: 1 + pattern: ^system:serviceaccount:[a-z0-9]([-a-z0-9]*[a-z0-9])?:[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + maxItems: 5 + type: array + currentHostnames: + description: currentHostnames is the list of current names used + by the route. Typically, this list should consist of a single + hostname, but if multiple hostnames are supported by the route + the operator may write multiple entries to this list. + items: + description: "Hostname is an alias for hostname string validation. + \n The left operand of the | is the original kubebuilder + hostname validation format, which is incorrect because it + allows upper case letters, disallows hyphen or number in + the TLD, and allows labels to start/end in non-alphanumeric + characters. See https://bugzilla.redhat.com/show_bug.cgi?id=2039256. + ^([a-zA-Z0-9\\p{S}\\p{L}]((-?[a-zA-Z0-9\\p{S}\\p{L}]{0,62})?)|([a-zA-Z0-9\\p{S}\\p{L}](([a-zA-Z0-9-\\p{S}\\p{L}]{0,61}[a-zA-Z0-9\\p{S}\\p{L}])?)(\\.)){1,}([a-zA-Z\\p{L}]){2,63})$ + \n The right operand of the | is a new pattern that mimics + the current API route admission validation on hostname, + except that it allows hostnames longer than the maximum + length: ^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + \n Both operand patterns are made available so that modifications + on ingress spec can still happen after an invalid hostname + was saved via validation by the incorrect left operand of + the | operator." + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + minItems: 1 + type: array + defaultHostname: + description: defaultHostname is the hostname of this route prior + to customization. + pattern: ^([a-zA-Z0-9\p{S}\p{L}]((-?[a-zA-Z0-9\p{S}\p{L}]{0,62})?)|([a-zA-Z0-9\p{S}\p{L}](([a-zA-Z0-9-\p{S}\p{L}]{0,61}[a-zA-Z0-9\p{S}\p{L}])?)(\.)){1,}([a-zA-Z\p{L}]){2,63})$|^(([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})[\.]){0,}([a-z0-9][-a-z0-9]{0,61}[a-z0-9]|[a-z0-9]{1,63})$ + type: string + name: + description: "name is the logical name of the route to customize. + It does not have to be the actual name of a route resource + but it cannot be renamed. \n The namespace and name of this + componentRoute must match a corresponding entry in the list + of spec.componentRoutes if the route is to be customized." + maxLength: 256 + minLength: 1 + type: string + namespace: + description: "namespace is the namespace of the route to customize. + It must be a real namespace. Using an actual namespace ensures + that no two components will conflict and the same component + can be installed multiple times. \n The namespace and name + of this componentRoute must match a corresponding entry in + the list of spec.componentRoutes if the route is to be customized." + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + relatedObjects: + description: relatedObjects is a list of resources which are + useful when debugging or inspecting how spec.componentRoutes + is applied. + items: + description: ObjectReference contains enough information to + let you inspect or modify the referred object. + properties: + group: + description: group of the referent. + type: string + name: + description: name of the referent. + type: string + namespace: + description: namespace of the referent. + type: string + resource: + description: resource of the referent. + type: string + required: + - group + - name + - resource + type: object + minItems: 1 + type: array + required: + - defaultHostname + - name + - namespace + - relatedObjects + type: object + type: array + x-kubernetes-list-map-keys: + - namespace + - name + x-kubernetes-list-type: map + defaultPlacement: + description: "defaultPlacement is set at installation time to control + which nodes will host the ingress router pods by default. The options + are control-plane nodes or worker nodes. \n This field works by + dictating how the Cluster Ingress Operator will consider unset replicas + and nodePlacement fields in IngressController resources when creating + the corresponding Deployments. \n See the documentation for the + IngressController replicas and nodePlacement fields for more information. + \n When omitted, the default value is Workers" + enum: + - ControlPlane + - Workers + - "" + type: string + type: object + required: + - spec + type: object + served: true + storage: true diff --git a/tests/integration/features/serverless_feature_test.go b/tests/integration/features/serverless_feature_test.go new file mode 100644 index 00000000000..26d7b4192f1 --- /dev/null +++ b/tests/integration/features/serverless_feature_test.go @@ -0,0 +1,222 @@ +package features_test + +import ( + "context" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + infrav1 "github.com/opendatahub-io/opendatahub-operator/v2/infrastructure/v1" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/serverless" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const ( + testNamespacePrefix = "test-ns" + testDomainFooCom = "*.foo.com" +) + +const knativeServingCrd = `apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: knativeservings.operator.knative.dev +spec: + group: operator.knative.dev + names: + kind: KnativeServing + listKind: KnativeServingList + plural: knativeservings + singular: knativeserving + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object +` + +const knativeServingInstance = `apiVersion: operator.knative.dev/v1beta1 +kind: KnativeServing +metadata: + name: knative-serving-instance +spec: {} +` + +const openshiftClusterIngress = `apiVersion: config.openshift.io/v1 +kind: Ingress +metadata: + name: cluster +spec: + domain: "foo.io" + loadBalancer: + platform: + type: ""` + +var _ = Describe("Serverless feature", func() { + + var testFeature *feature.Feature + var objectCleaner *envtestutil.Cleaner + + BeforeEach(func() { + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) + + testFeatureName := "serverless-feature" + namespace := envtestutil.AppendRandomNameTo(testFeatureName) + + dsciSpec := newDSCInitializationSpec(namespace) + testFeature, err = feature.CreateFeature(testFeatureName). + For(dsciSpec). + UsingConfig(envTest.Config). + Load() + + Expect(err).ToNot(HaveOccurred()) + }) + + Context("verifying preconditions", func() { + + When("operator is not installed", func() { + It("operator presence check should return an error", func() { + Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).To(HaveOccurred()) + }) + }) + + When("operator is installed", func() { + var knativeServingCrdObj *apiextensionsv1.CustomResourceDefinition + + BeforeEach(func() { + // Create KNativeServing the CRD + knativeServingCrdObj = &apiextensionsv1.CustomResourceDefinition{} + Expect(yaml.Unmarshal([]byte(knativeServingCrd), knativeServingCrdObj)).ToNot(HaveOccurred()) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + Expect(c.Create(context.TODO(), knativeServingCrdObj)).ToNot(HaveOccurred()) + + crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} + err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{knativeServingCrdObj}, crdOptions) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + // Delete KNativeServing CRD + objectCleaner.DeleteAll(knativeServingCrdObj) + }) + + It("operator presence check should succeed", func() { + Expect(serverless.EnsureServerlessOperatorInstalled(testFeature)).ToNot(HaveOccurred()) + }) + + It("KNative serving absence check should succeed if serving is not installed", func() { + Expect(serverless.EnsureServerlessAbsent(testFeature)).ToNot(HaveOccurred()) + }) + + It("KNative serving absence check should fail when serving is present", func() { + ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) + nsResource := createNamespace(ns) + Expect(envTestClient.Create(context.TODO(), nsResource)).ToNot(HaveOccurred()) + defer objectCleaner.DeleteAll(nsResource) + + knativeServing := &unstructured.Unstructured{} + Expect(yaml.Unmarshal([]byte(knativeServingInstance), knativeServing)).ToNot(HaveOccurred()) + knativeServing.SetNamespace(nsResource.Name) + Expect(envTestClient.Create(context.TODO(), knativeServing)).ToNot(HaveOccurred()) + + Expect(serverless.EnsureServerlessAbsent(testFeature)).To(HaveOccurred()) + }) + }) + }) + + Context("default values", func() { + + Context("ingress gateway TLS secret name", func() { + + It("should set default value when value is empty in the DSCI", func() { + // Default value is blank -> testFeature.Spec.Serving.IngressGateway.Certificate.SecretName = "" + Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) + Expect(testFeature.Spec.KnativeCertificateSecret).To(Equal(serverless.DefaultCertificateSecretName)) + }) + + It("should use user value when set in the DSCI", func() { + testFeature.Spec.Serving.IngressGateway.Certificate.SecretName = "fooBar" + Expect(serverless.ServingDefaultValues(testFeature)).To(Succeed()) + Expect(testFeature.Spec.KnativeCertificateSecret).To(Equal("fooBar")) + }) + }) + + Context("ingress domain name suffix", func() { + + It("should use OpenShift ingress domain when value is empty in the DSCI", func() { + // Create KNativeServing the CRD + osIngressResource := &unstructured.Unstructured{} + Expect(yaml.Unmarshal([]byte(openshiftClusterIngress), osIngressResource)).ToNot(HaveOccurred()) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + Expect(c.Create(context.TODO(), osIngressResource)).ToNot(HaveOccurred()) + + // Default value is blank -> testFeature.Spec.Serving.IngressGateway.Domain = "" + Expect(serverless.ServingIngressDomain(testFeature)).ToNot(HaveOccurred()) + Expect(testFeature.Spec.KnativeIngressDomain).To(Equal("*.foo.io")) + }) + + It("should use user value when set in the DSCI", func() { + testFeature.Spec.Serving.IngressGateway.Domain = testDomainFooCom + Expect(serverless.ServingIngressDomain(testFeature)).ToNot(HaveOccurred()) + Expect(testFeature.Spec.KnativeIngressDomain).To(Equal(testDomainFooCom)) + }) + }) + }) + + Context("resources creation", func() { + + It("should create a TLS secret if certificate is SelfSigned", func() { + ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) + nsResource := createNamespace(ns) + Expect(envTestClient.Create(context.TODO(), nsResource)).ToNot(HaveOccurred()) + defer objectCleaner.DeleteAll(nsResource) + + testFeature.Spec.ControlPlane.Namespace = nsResource.Name + testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.SelfSigned + testFeature.Spec.Serving.IngressGateway.Domain = testDomainFooCom + Expect(serverless.ServingDefaultValues(testFeature)).ToNot(HaveOccurred()) + Expect(serverless.ServingIngressDomain(testFeature)).ToNot(HaveOccurred()) + + Expect(serverless.ServingCertificateResource(testFeature)).ToNot(HaveOccurred()) + + secret, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).Get(context.TODO(), serverless.DefaultCertificateSecretName, v1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(secret).ToNot(BeNil()) + }) + + It("should not create any TLS secret if certificate is user provided", func() { + ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) + nsResource := createNamespace(ns) + Expect(envTestClient.Create(context.TODO(), nsResource)).ToNot(HaveOccurred()) + defer objectCleaner.DeleteAll(nsResource) + + testFeature.Spec.ControlPlane.Namespace = nsResource.Name + testFeature.Spec.Serving.IngressGateway.Certificate.Type = infrav1.Provided + testFeature.Spec.Serving.IngressGateway.Domain = "*.foo.com" + Expect(serverless.ServingDefaultValues(testFeature)).ToNot(HaveOccurred()) + Expect(serverless.ServingIngressDomain(testFeature)).ToNot(HaveOccurred()) + + Expect(serverless.ServingCertificateResource(testFeature)).ToNot(HaveOccurred()) + + list, err := testFeature.Clientset.CoreV1().Secrets(nsResource.Name).List(context.TODO(), v1.ListOptions{}) + Expect(err).ToNot(HaveOccurred()) + Expect(list.Items).To(BeEmpty()) + }) + + }) +}) diff --git a/tests/integration/features/servicemesh_feature_test.go b/tests/integration/features/servicemesh_feature_test.go new file mode 100644 index 00000000000..b05bf8cd72a --- /dev/null +++ b/tests/integration/features/servicemesh_feature_test.go @@ -0,0 +1,199 @@ +package features_test + +import ( + "context" + "fmt" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/feature/servicemesh" + "github.com/opendatahub-io/opendatahub-operator/v2/pkg/gvr" + "github.com/opendatahub-io/opendatahub-operator/v2/tests/envtestutil" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const smcpCrd = `apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + maistra-version: 2.4.2 + annotations: + service.beta.openshift.io/inject-cabundle: "true" + controller-gen.kubebuilder.io/version: v0.4.1 + name: servicemeshcontrolplanes.maistra.io +spec: + group: maistra.io + names: + categories: + - maistra-io + kind: ServiceMeshControlPlane + listKind: ServiceMeshControlPlaneList + plural: servicemeshcontrolplanes + shortNames: + - smcp + singular: servicemeshcontrolplane + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: false + subresources: + status: {} + - name: v2 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +` + +var _ = Describe("Service Mesh feature", func() { + var testFeature *feature.Feature + var objectCleaner *envtestutil.Cleaner + + BeforeEach(func() { + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + objectCleaner = envtestutil.CreateCleaner(c, envTest.Config, timeout, interval) + + testFeatureName := "servicemesh-feature" + namespace := envtestutil.AppendRandomNameTo(testFeatureName) + + dsciSpec := newDSCInitializationSpec(namespace) + testFeature, err = feature.CreateFeature(testFeatureName). + For(dsciSpec). + UsingConfig(envTest.Config). + Load() + + Expect(err).ToNot(HaveOccurred()) + }) + + Describe("preconditions", func() { + When("operator is not installed", func() { + It("operator presence check should return an error", func() { + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(HaveOccurred()) + }) + }) + When("operator is installed", func() { + var smcpCrdObj *apiextensionsv1.CustomResourceDefinition + + BeforeEach(func() { + // Create SMCP the CRD + smcpCrdObj = &apiextensionsv1.CustomResourceDefinition{} + Expect(yaml.Unmarshal([]byte(smcpCrd), smcpCrdObj)).ToNot(HaveOccurred()) + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + Expect(c.Create(context.TODO(), smcpCrdObj)).ToNot(HaveOccurred()) + + crdOptions := envtest.CRDInstallOptions{PollInterval: interval, MaxTime: timeout} + err = envtest.WaitForCRDs(envTest.Config, []*apiextensionsv1.CustomResourceDefinition{smcpCrdObj}, crdOptions) + Expect(err).ToNot(HaveOccurred()) + }) + AfterEach(func() { + // Delete SMCP CRD + objectCleaner.DeleteAll(smcpCrdObj) + }) + It("operator presence check should succeed", func() { + Expect(servicemesh.EnsureServiceMeshOperatorInstalled(testFeature)).To(Succeed()) + }) + It("should find installed Service Mesh Control Plane", func() { + c, err := client.New(envTest.Config, client.Options{}) + Expect(err).ToNot(HaveOccurred()) + + ns := envtestutil.AppendRandomNameTo(testNamespacePrefix) + nsResource := createNamespace(ns) + Expect(c.Create(context.Background(), nsResource)).To(Succeed()) + defer objectCleaner.DeleteAll(nsResource) + + createServiceMeshControlPlane("test-name", ns) + + testFeature.Spec.ControlPlane.Namespace = ns + testFeature.Spec.ControlPlane.Name = "test-name" + Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).To(Succeed()) + }) + It("should fail to find Service Mesh Control Plane if not present", func() { + Expect(servicemesh.EnsureServiceMeshInstalled(testFeature)).ToNot(Succeed()) + }) + }) + }) +}) + +func createServiceMeshControlPlane(name, namespace string) { + serviceMeshControlPlane := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "maistra.io/v2", + "kind": "ServiceMeshControlPlane", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + }, + "spec": map[string]interface{}{}, + }, + } + Expect(createSMCPInCluster(envTest.Config, serviceMeshControlPlane, namespace)).To(Succeed()) +} + +// createSMCPInCluster uses dynamic client to create a dummy SMCP resource for testing +func createSMCPInCluster(cfg *rest.Config, smcpObj *unstructured.Unstructured, namespace string) error { + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return err + } + + result, err := dynamicClient.Resource(gvr.SMCP).Namespace(namespace).Create(context.TODO(), smcpObj, metav1.CreateOptions{}) + if err != nil { + return err + } + + statusConditions := []interface{}{ + map[string]interface{}{ + "type": "Ready", + "status": "True", + }, + } + + // Since we don't have actual service mesh operator deployed, we simulate the status + status := map[string]interface{}{ + "conditions": statusConditions, + "readiness": map[string]interface{}{ + "components": map[string]interface{}{ + "pending": []interface{}{}, + "ready": []interface{}{ + "istiod", + "ingress-gateway", + }, + "unready": []interface{}{}, + }, + }, + } + + if err := unstructured.SetNestedField(result.Object, status, "status"); err != nil { + return err + } + + r, err := dynamicClient.Resource(gvr.SMCP).Namespace(namespace).UpdateStatus(context.TODO(), result, metav1.UpdateOptions{}) + if err != nil { + return err + } + fmt.Printf("result: %v", r) + + return nil +}