Skip to content

Commit

Permalink
Adjust connection controller and how it handles the license and netwo…
Browse files Browse the repository at this point in the history
…rk policy
  • Loading branch information
tmjd committed Dec 20, 2024
1 parent 8ee8d1e commit 4a34d28
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 97 deletions.
109 changes: 21 additions & 88 deletions pkg/controller/clusterconnection/clusterconnection_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,12 @@ package clusterconnection
import (
"context"
"fmt"
"net"

"github.com/tigera/operator/pkg/url"
"golang.org/x/net/http/httpproxy"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"

"github.com/go-logr/logr"

corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
Expand Down Expand Up @@ -410,40 +406,31 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
return reconcile.Result{RequeueAfter: utils.StandardRetry}, nil
}

tierAvailable := false
// Ensure the allow-tigera tier exists, before rendering any network policies within it.
includeV3NetworkPolicy := false
if err := r.Client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err != nil {
// The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the
// License becomes available. Therefore, if we fail to query the Tier, we exclude NetworkPolicy from reconciliation
// and tolerate errors arising from the Tier not being created.
if !k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger)
return reconcile.Result{}, err
}
} else {
includeV3NetworkPolicy = true

// The Tier has been created, which means that this controller's reconciliation should no longer be a dependency
// of the License being deployed. If NetworkPolicy requires license features, it should now be safe to validate
// License presence and sufficiency.
if egressAccessControlRequired(managementClusterConnection.Spec.ManagementClusterAddr, r.resolvedPodProxies, log) {
license, err := utils.FetchLicenseKey(ctx, r.Client)
if err != nil {
if k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceNotFound, "License not found", err, reqLogger)
return reconcile.Result{}, nil
}
r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger)
return reconcile.Result{}, err
}
if err := r.Client.Get(ctx, client.ObjectKey{Name: networkpolicy.TigeraComponentTierName}, &v3.Tier{}); err == nil {
tierAvailable = true
} else if !k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying allow-tigera tier", err, reqLogger)
return reconcile.Result{}, err
}

if !utils.IsFeatureActive(license, common.EgressAccessControlFeature) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Feature is not active - License does not support feature: egress-access-control", nil, reqLogger)
return reconcile.Result{}, nil
}
licenseActive := false
// Ensure the license can support enterprise policy, before rendering any network policies within it.
if license, err := utils.FetchLicenseKey(ctx, r.Client); err == nil {
if utils.IsFeatureActive(license, common.EgressAccessControlFeature) {
licenseActive = true
}
} else if !k8serrors.IsNotFound(err) {
r.status.SetDegraded(operatorv1.ResourceReadError, "Error querying license", err, reqLogger)
return reconcile.Result{}, err
}

// The creation of the Tier depends on this controller to reconcile it's non-NetworkPolicy resources so that the
// License becomes available. Therefore, if we fail to query the Tier, we exclude NetworkPolicy from reconciliation
// and tolerate errors arising from the Tier not being created.
includeEgressNetworkPolicy := tierAvailable && licenseActive

ch := utils.NewComponentHandler(log, r.Client, r.Scheme, managementClusterConnection)
guardianCfg := &render.GuardianConfiguration{
URL: managementClusterConnection.Spec.ManagementClusterAddr,
Expand All @@ -463,7 +450,7 @@ func (r *ReconcileConnection) Reconcile(ctx context.Context, request reconcile.R
// In managed clusters, the clusterconnection controller is a dependency for the License to be created. In case the
// License is unavailable and reconciliation of non-NetworkPolicy resources in the clusterconnection controller
// would resolve it, we render network policies last to prevent a chicken-and-egg scenario.
if includeV3NetworkPolicy {
if includeEgressNetworkPolicy {
policyComponent, err := render.GuardianPolicy(guardianCfg)
if err != nil {
log.Error(err, "Failed to create NetworkPolicy component for Guardian, policy will be omitted")
Expand Down Expand Up @@ -498,57 +485,3 @@ func fillDefaults(mcc *operatorv1.ManagementClusterConnection) {
mcc.Spec.TLS.CA = operatorv1.CATypeTigera
}
}

func egressAccessControlRequired(target string, podProxies []*httpproxy.Config, log logr.Logger) bool {
processedPodProxies := render.ProcessPodProxies(podProxies)
for _, httpProxyConfig := range processedPodProxies {
if egressAccessControlRequiredByProxy(target, httpProxyConfig, log) {
return true
}
}
return false
}

func egressAccessControlRequiredByProxy(target string, httpProxyConfig *httpproxy.Config, log logr.Logger) bool {
var destinationHostPort string
if httpProxyConfig != nil && httpProxyConfig.HTTPSProxy != "" {
// HTTPS proxy is specified as a URL.
proxyHostPort, err := url.ParseHostPortFromHTTPProxyString(httpProxyConfig.HTTPSProxy)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse HTTP Proxy URL (%s). Assuming %s does not require license feature %s",
httpProxyConfig.HTTPSProxy,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}

destinationHostPort = proxyHostPort
} else {
// Target is already specified as host:port.
destinationHostPort = target
}

// Determine if the host in the host:port is a domain name.
hostPortHasDomain, err := hostPortUsesDomainName(destinationHostPort)
if err != nil {
log.Error(err, fmt.Sprintf(
"Failed to parse resolved host:port (%s) for remote tunnel endpoint. Assuming %s does not require license feature %s",
destinationHostPort,
render.GuardianPolicyName,
common.EgressAccessControlFeature,
))
return false
}
return hostPortHasDomain
}

func hostPortUsesDomainName(hostPort string) (bool, error) {
host, _, err := net.SplitHostPort(hostPort)
if err != nil {
return false, err
}

return net.ParseIP(host) == nil, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -299,20 +299,16 @@ var _ = Describe("ManagementClusterConnection controller tests", func() {
Expect(policies.Items[1].Name).To(Equal("allow-tigera.guardian-access"))
})

It("should degrade and wait when tier is ready, but license is not sufficient", func() {
It("should omit allow-tigera policy when tier is ready, but license is not sufficient", func() {
licenseKey.Status.Features = []string{common.TiersFeature}
Expect(c.Update(ctx, licenseKey)).NotTo(HaveOccurred())

mockStatus = &status.MockStatus{}
mockStatus.On("Run").Return()
mockStatus.On("OnCRFound").Return()
mockStatus.On("SetDegraded", operatorv1.ResourceReadError, "Feature is not active - License does not support feature: egress-access-control", mock.Anything, mock.Anything).Return()
mockStatus.On("SetMetaData", mock.Anything).Return()

r = clusterconnection.NewReconcilerWithShims(c, clientScheme, mockStatus, operatorv1.ProviderNone, ready)
_, err := r.Reconcile(ctx, reconcile.Request{})
Expect(err).ShouldNot(HaveOccurred())
mockStatus.AssertExpectations(GinkgoT())

policies := v3.NetworkPolicyList{}
Expect(c.List(ctx, &policies)).ToNot(HaveOccurred())
Expect(policies.Items).To(HaveLen(0))
})

It("should degrade and wait when tier and license are ready, but tier watch is not ready", func() {
Expand Down

0 comments on commit 4a34d28

Please sign in to comment.