From 033338b21b9fa336f44aebd2271188d330c2c75a Mon Sep 17 00:00:00 2001 From: Clint Date: Fri, 20 Dec 2024 08:49:07 -0600 Subject: [PATCH] fix: add generated target for all node IPs (#1119) ## Description Adds a new generator / target called `KubeNodes` that contains the internal IP addresses of nodes in the cluster. **NOTE:** ~I have no idea (yet) wher the `docs/reference/` file changes came from.~ They appear to be missing on `main`. ## Related Issue Relates to #970 . `Steps to Validate` include steps to verify 970 gets fixed. ## Type of change - [x] Bug fix (non-breaking change which fixes an issue) - [x] New feature (non-breaking change which adds functionality) - [ ] Other (security config, docs update, etc) ## Steps to Validate
### Setup and verify behavior of the target Create a k3d cluster named `uds` (we use names later for adding nodes): ```bash k3d cluster create uds ``` Deploy slim-dev: ```bash uds run slim-dev ``` Create and deploy monitoring layer: ```bash uds run -f ./tasks/create.yaml single-layer-callable --set LAYER=monitoring uds run -f ./tasks/deploy.yaml single-layer-callable --set LAYER=monitoring ``` Create and deploy metrics-server layer: ```bash uds run -f ./tasks/create.yaml single-layer-callable --set LAYER=metrics-server uds run -f ./tasks/deploy.yaml single-layer-callable --set LAYER=metrics-server ``` Inspect the network policy for scraping of kube nodes: ```bash kubectl describe networkpolicy allow-prometheus-stack-egress-metrics-scraping-of-kube-nodes -n monitoring ``` The `spec:` part is the relevant part, and should contain the IPs of the nodes: ```bash Spec: PodSelector: app.kubernetes.io/name=prometheus Not affecting ingress traffic Allowing egress traffic: To Port: (traffic allowed to all ports) To: IPBlock: CIDR: 172.28.0.2/32 Except: Policy Types: Egress ``` Add a node: ```bash k3d node create extra1 --cluster uds --wait --memory 500M ``` Verify the internal IP of the new node: ```bash kubectl get nodes -o custom-columns="NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=='InternalIP')].address" ``` Re-get the netpol to verify the new ip is in the `spec:` block: ```bash kubectl describe networkpolicy allow-prometheus-stack-egress-metrics-scraping-of-kube-nodes -n monitorin ``` Should now be something like this: ```bash Spec: PodSelector: app.kubernetes.io/name=prometheus Not affecting ingress traffic Allowing egress traffic: To Port: (traffic allowed to all ports) To: IPBlock: CIDR: 172.28.0.2/32 Except: To: IPBlock: CIDR: 172.28.0.4/32 Except: Policy Types: Egress ``` ### Verify Prometheus can read things Connect directly to prometheus: ```bash kubectl port-forward -n monitoring svc/kube-prometheus-stack-prometheus 9090:9090 ``` Visit http://localhost:9090/ Execute this expression to see all node/cpu data: ```bash node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate ``` To see just info from the `extra1` node: ```bash node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{node=~"^k3d-extra.*"} ``` Add a new node: ```bash k3d node create extra2 --cluster uds --wait --memory 500M ``` Verify the netpol updates: ```bash kubectl describe networkpolicy allow-prometheus-stack-egress-metrics-scraping-of-kube-nodes -n monitorin ``` Re-execute the Prometheus query from above. It make take a few minutes for `extra2` to show up though. Not sure why. Delete a node and verify the spec updates again: ```bash kubectl delete node k3d-extra1-0 && k3d node delete k3d-extra1-0 ``` Re-reading the netpol should should the removal of that IP
## Checklist before merging - [x] Test, docs, adr added or updated as needed - [x] [Contributor Guide](https://github.com/defenseunicorns/uds-template-capability/blob/main/CONTRIBUTING.md) followed --------- Signed-off-by: catsby Co-authored-by: Micah Nagel --- .../custom resources/packages-v1alpha1-cr.md | 2 +- .../uds-networking-configuration.md | 19 ++ src/pepr/config.ts | 3 + .../operator/controllers/network/generate.ts | 7 +- .../network/generators/kubeNodes.spec.ts | 218 ++++++++++++++++++ .../network/generators/kubeNodes.ts | 179 ++++++++++++++ .../operator/controllers/network/policies.ts | 7 + .../crd/generated/package-v1alpha1.ts | 1 + .../operator/crd/sources/package/v1alpha1.ts | 2 +- src/pepr/operator/index.ts | 23 ++ src/pepr/uds-operator-config/values.yaml | 1 + .../chart/templates/uds-package.yaml | 6 + 12 files changed, 465 insertions(+), 3 deletions(-) create mode 100644 src/pepr/operator/controllers/network/generators/kubeNodes.spec.ts create mode 100644 src/pepr/operator/controllers/network/generators/kubeNodes.ts diff --git a/docs/reference/configuration/custom resources/packages-v1alpha1-cr.md b/docs/reference/configuration/custom resources/packages-v1alpha1-cr.md index f01c2655e..5d9da603e 100644 --- a/docs/reference/configuration/custom resources/packages-v1alpha1-cr.md +++ b/docs/reference/configuration/custom resources/packages-v1alpha1-cr.md @@ -124,7 +124,7 @@ tableOfContents: - descriptionstringA description of the policy, this will become part of the policy namedirectionstring (enum):
  • Ingress
  • Egress
The direction of the trafficlabelsThe labels to apply to the policypodLabelsDeprecated: use selectorportnumberThe port to allow (protocol is always TCP)portsnumber[]A list of ports to allow (protocol is always TCP)remoteCidrstringCustom generated policy CIDRremoteGeneratedstring (enum):
  • KubeAPI
  • IntraNamespace
  • CloudMetadata
  • Anywhere
Custom generated remote selector for the policyremoteNamespacestringThe remote namespace to allow traffic to/from. Use * or empty string to allow all namespacesremotePodLabelsDeprecated: use remoteSelectorremoteSelectorThe remote pod selector labels to allow traffic to/fromselectorLabels to match pods in the namespace to apply the policy to. Leave empty to apply to all pods in the namespace + descriptionstringA description of the policy, this will become part of the policy namedirectionstring (enum):
  • Ingress
  • Egress
The direction of the trafficlabelsThe labels to apply to the policypodLabelsDeprecated: use selectorportnumberThe port to allow (protocol is always TCP)portsnumber[]A list of ports to allow (protocol is always TCP)remoteCidrstringCustom generated policy CIDRremoteGeneratedstring (enum):
  • KubeAPI
  • KubeNodes
  • IntraNamespace
  • CloudMetadata
  • Anywhere
Custom generated remote selector for the policyremoteNamespacestringThe remote namespace to allow traffic to/from. Use * or empty string to allow all namespacesremotePodLabelsDeprecated: use remoteSelectorremoteSelectorThe remote pod selector labels to allow traffic to/fromselectorLabels to match pods in the namespace to apply the policy to. Leave empty to apply to all pods in the namespace diff --git a/docs/reference/configuration/uds-networking-configuration.md b/docs/reference/configuration/uds-networking-configuration.md index f71c77e55..3e3577641 100644 --- a/docs/reference/configuration/uds-networking-configuration.md +++ b/docs/reference/configuration/uds-networking-configuration.md @@ -25,6 +25,25 @@ This configuration directs the operator to use the specified CIDR range (`172.0. When configuring a static CIDR range, it is important to make the range as restrictive as possible to limit the potential for unexpected networking access. An overly broad range could inadvertently allow egress traffic to destinations beyond the intended scope. Additionally, careful alignment with the actual IP addresses used by the Kubernetes API server is essential. A mismatch between the specified CIDR range and the cluster's configuration can result in network policy enforcement issues or disrupted connectivity. +## KubeNodes CIDRs + +The UDS operator is responsible for dynamically updating network policies that use the `remoteGenerated: KubeNodes` custom selector, in response to changes to nodes in the Kubernetes cluster. As nodes are added, updated, or removed from a cluster, the operator will ensure that policies remain accurate and include all the nodes in the cluster. + +UDS operator provides an option to configure a set of static CIDR ranges in place of offering a dynamically updated list by setting an override to `operator.KUBENODE_CIDRS` in your bundle as a value or variable. The value should be a single string of comma (`,`) separated values for the individual IP addresses, using `/32` notation. For example: + +```yaml +packages: + - name: uds-core + repository: ghcr.io/defenseunicorns/packages/uds/core + ref: x.x.x + overrides: + uds-operator-config: + uds-operator-config: + values: + - path: operator.KUBENODE_CIDRS + value: "172.28.0.2/32,172.28.0.3/32,172.28.0.4/32" +``` + ## Additional Network Allowances Applications deployed in UDS Core utilize [Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) with a "Deny by Default" configuration to ensure network traffic is restricted to only what is necessary. Some applications in UDS Core allow for overrides to accommodate environment-specific requirements. diff --git a/src/pepr/config.ts b/src/pepr/config.ts index a51288f18..9ae7d4bbe 100644 --- a/src/pepr/config.ts +++ b/src/pepr/config.ts @@ -39,6 +39,9 @@ export const UDSConfig = { // Static CIDR range to use for KubeAPI instead of k8s watch kubeApiCidr: process.env.KUBEAPI_CIDR, + // Static CIDRs to use for KubeNodes instead of k8s watch. Comma separated list of CIDRs. + kubeNodeCidrs: process.env.KUBENODE_CIDRS, + // Track if UDS Core identity-authorization layer is deployed isIdentityDeployed: false, }; diff --git a/src/pepr/operator/controllers/network/generate.ts b/src/pepr/operator/controllers/network/generate.ts index 6ead5ba1b..a3d087a8f 100644 --- a/src/pepr/operator/controllers/network/generate.ts +++ b/src/pepr/operator/controllers/network/generate.ts @@ -11,6 +11,7 @@ import { anywhere, anywhereInCluster } from "./generators/anywhere"; import { cloudMetadata } from "./generators/cloudMetadata"; import { intraNamespace } from "./generators/intraNamespace"; import { kubeAPI } from "./generators/kubeAPI"; +import { kubeNodes } from "./generators/kubeNodes"; import { remoteCidr } from "./generators/remoteCidr"; function isWildcardNamespace(namespace: string) { @@ -26,6 +27,10 @@ function getPeers(policy: Allow): V1NetworkPolicyPeer[] { peers = kubeAPI(); break; + case RemoteGenerated.KubeNodes: + peers = kubeNodes(); + break; + case RemoteGenerated.CloudMetadata: peers = cloudMetadata; break; @@ -93,7 +98,7 @@ export function generate(namespace: string, policy: Allow): kind.NetworkPolicy { }; } - // Add the generated policy label (used to track KubeAPI policies) + // Add the generated policy label (used to track KubeAPI and KubeNodes policies) if (policy.remoteGenerated) { generated.metadata!.labels!["uds/generated"] = policy.remoteGenerated; } diff --git a/src/pepr/operator/controllers/network/generators/kubeNodes.spec.ts b/src/pepr/operator/controllers/network/generators/kubeNodes.spec.ts new file mode 100644 index 000000000..9ddd137df --- /dev/null +++ b/src/pepr/operator/controllers/network/generators/kubeNodes.spec.ts @@ -0,0 +1,218 @@ +/** + * Copyright 2024 Defense Unicorns + * SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial + */ + +import { beforeEach, beforeAll, describe, expect, it, jest } from "@jest/globals"; + +import { + initAllNodesTarget, + kubeNodes, + updateKubeNodesFromCreateUpdate, + updateKubeNodesFromDelete, +} from "./kubeNodes"; +import { K8s, kind } from "pepr"; +import { V1NetworkPolicyList } from "@kubernetes/client-node"; +import { anywhere } from "./anywhere"; + +type KubernetesList = { + items: T[]; +}; + +jest.mock("pepr", () => { + const originalModule = jest.requireActual("pepr") as object; + return { + ...originalModule, + K8s: jest.fn(), + kind: { + Node: "Node", + NetworkPolicy: "NetworkPolicy", + }, + }; +}); + +describe("kubeNodes module", () => { + const mockNodeList = { + items: [ + { + metadata: { name: "node1" }, + status: { + addresses: [{ type: "InternalIP", address: "10.0.0.1" }], + conditions: [{ type: "Ready", status: "True" }], + }, + }, + { + metadata: { name: "node2" }, + status: { + addresses: [{ type: "InternalIP", address: "10.0.0.2" }], + conditions: [{ type: "Ready", status: "True" }], + }, + }, + ], + }; + + const mockNetworkPolicyList: V1NetworkPolicyList = { + apiVersion: "networking.k8s.io/v1", + kind: "NetworkPolicyList", + items: [ + { + apiVersion: "networking.k8s.io/v1", + kind: "NetworkPolicy", + metadata: { + name: "example-policy", + namespace: "default", + }, + spec: { + podSelector: {}, // required field + policyTypes: ["Egress"], // or ["Ingress"], or both + egress: [ + { + to: [{ ipBlock: { cidr: "0.0.0.0/0" } }], // an IP we don't want + }, + ], + }, + }, + ], + }; + + const mockK8sGetNodes = jest.fn<() => Promise>>(); + const mockGetNetworkPolicies = jest.fn<() => Promise>>(); + const mockApply = jest.fn(); + + beforeAll(() => { + (K8s as jest.Mock).mockImplementation(() => ({ + Get: mockK8sGetNodes, + WithLabel: jest.fn(() => ({ + Get: mockGetNetworkPolicies, + })), + Apply: mockApply, + })); + }); + + beforeEach(() => { + jest.clearAllMocks(); + }); + + describe("initAllNodesTarget", () => { + it("should initialize nodeSet with internal IPs from nodes", async () => { + mockK8sGetNodes.mockResolvedValue(mockNodeList); + await initAllNodesTarget(); + const cidrs = kubeNodes(); + // Should have two IPs from mockNodeList + expect(cidrs).toHaveLength(2); + expect(cidrs).toEqual( + expect.arrayContaining([ + { ipBlock: { cidr: "10.0.0.1/32" } }, + { ipBlock: { cidr: "10.0.0.2/32" } }, + ]), + ); + }); + }); + + describe("nodeCIDRs", () => { + it("should return anywhere if no nodes known", async () => { + mockK8sGetNodes.mockResolvedValue({ items: [] }); + await initAllNodesTarget(); + const cidrs = kubeNodes(); + // expect it to match "anywhere" + expect(cidrs).toEqual([anywhere]); + }); + }); + + describe("updateKubeNodesFromCreateUpdate", () => { + it("should add a node IP if node is ready", async () => { + mockK8sGetNodes.mockResolvedValueOnce({ items: [] }); + mockGetNetworkPolicies.mockResolvedValue(mockNetworkPolicyList); + await initAllNodesTarget(); // start empty + await updateKubeNodesFromCreateUpdate(mockNodeList.items[0]); + let cidrs = kubeNodes(); + expect(cidrs).toHaveLength(1); + expect(cidrs[0].ipBlock?.cidr).toBe("10.0.0.1/32"); + expect(mockApply).toHaveBeenCalled(); + + await updateKubeNodesFromCreateUpdate(mockNodeList.items[1]); + cidrs = kubeNodes(); + expect(cidrs).toHaveLength(2); + expect(cidrs[1].ipBlock?.cidr).toBe("10.0.0.2/32"); + expect(mockApply).toHaveBeenCalled(); + }); + + it("should not remove a node that's no longer ready", async () => { + mockK8sGetNodes.mockResolvedValue(mockNodeList); + await initAllNodesTarget(); + let cidrs = kubeNodes(); + // Should have two IPs from mockNodeList + expect(cidrs).toHaveLength(2); + expect(cidrs).toEqual( + expect.arrayContaining([ + { ipBlock: { cidr: "10.0.0.1/32" } }, + { ipBlock: { cidr: "10.0.0.2/32" } }, + ]), + ); + + const notReadyNode = { + metadata: { name: "node2" }, + status: { + addresses: [{ type: "InternalIP", address: "10.0.0.1" }], + conditions: [{ type: "Ready", status: "False" }], + }, + }; + await updateKubeNodesFromCreateUpdate(notReadyNode); + cidrs = kubeNodes(); + expect(cidrs).toHaveLength(2); + expect(cidrs).toEqual( + expect.arrayContaining([ + { ipBlock: { cidr: "10.0.0.1/32" } }, + { ipBlock: { cidr: "10.0.0.2/32" } }, + ]), + ); + }); + + it("should not apply netpol policy changes if a node is already included", async () => { + // setup 1 node in the set and expect 1 application to a policy + mockK8sGetNodes.mockResolvedValueOnce({ items: [] }); + mockGetNetworkPolicies.mockResolvedValue(mockNetworkPolicyList); + await initAllNodesTarget(); // start empty + // add a node even if it's not ready + const initialNode = { + metadata: { name: "node1" }, + status: { + addresses: [{ type: "InternalIP", address: "10.0.0.9" }], + conditions: [{ type: "Ready", status: "False" }], + }, + }; + await updateKubeNodesFromCreateUpdate(initialNode); + let cidrs = kubeNodes(); + expect(cidrs).toHaveLength(1); + expect(cidrs[0].ipBlock?.cidr).toBe("10.0.0.9/32"); + expect(mockApply).toHaveBeenCalled(); + + // clear out the apply from the setup + mockApply.mockClear(); + // change initialNode to set the status to ready + initialNode.status.conditions[0].status = "True"; + await updateKubeNodesFromCreateUpdate(initialNode); + cidrs = kubeNodes(); + expect(cidrs).toHaveLength(1); + expect(cidrs[0].ipBlock?.cidr).toBe("10.0.0.9/32"); + + // the apply should not have been called + expect(mockApply).not.toHaveBeenCalled(); + }); + }); + + describe("updateKubeNodesFromDelete", () => { + it("should remove the node IP from nodeSet", async () => { + mockK8sGetNodes.mockResolvedValueOnce(mockNodeList); + await initAllNodesTarget(); + const cidrsBeforeDelete = kubeNodes(); + expect(cidrsBeforeDelete).toHaveLength(2); + + await updateKubeNodesFromDelete(mockNodeList.items[0]); + const cidrsAfterDelete = kubeNodes(); + expect(cidrsAfterDelete).toHaveLength(1); + expect(cidrsAfterDelete[0].ipBlock?.cidr).toBe("10.0.0.2/32"); + expect(mockApply).toHaveBeenCalled(); + }); + }); +}); diff --git a/src/pepr/operator/controllers/network/generators/kubeNodes.ts b/src/pepr/operator/controllers/network/generators/kubeNodes.ts new file mode 100644 index 000000000..ba5c16529 --- /dev/null +++ b/src/pepr/operator/controllers/network/generators/kubeNodes.ts @@ -0,0 +1,179 @@ +/** + * Copyright 2024 Defense Unicorns + * SPDX-License-Identifier: AGPL-3.0-or-later OR LicenseRef-Defense-Unicorns-Commercial + */ + +import { KubernetesListObject } from "@kubernetes/client-node"; +import { V1NetworkPolicyPeer, V1NodeAddress } from "@kubernetes/client-node"; +import { K8s, kind, R } from "pepr"; + +import { Component, setupLogger } from "../../../../logger"; +import { RemoteGenerated } from "../../../crd"; +import { anywhere } from "./anywhere"; +import { UDSConfig } from "../../../../config"; +import { retryWithDelay } from "../../utils"; + +const log = setupLogger(Component.OPERATOR_GENERATORS); + +// Maintain a set of all node internal IPs +const nodeSet = new Set(); + +/** + * Initialize the node targets by fetching the current nodes in the cluster + * and populating the nodeSet with their Internal IPs. + */ +export async function initAllNodesTarget() { + // if a list of CIDRs is defined, use those + if (UDSConfig.kubeNodeCidrs) { + const nodeCidrs = UDSConfig.kubeNodeCidrs.split(","); + for (const nodeCidr of nodeCidrs) { + nodeSet.add(nodeCidr); + } + await updateKubeNodesNetworkPolicies(); + return; + } + + try { + const nodes = await retryWithDelay(fetchKubernetesNodes, log); + nodeSet.clear(); + + for (const node of nodes.items) { + const ip = getNodeInternalIP(node); + if (ip) nodeSet.add(ip); + } + await updateKubeNodesNetworkPolicies(); + } catch (err) { + log.error("error fetching node IPs:", err); + } +} + +/** + * Returns the egress CIDRs of all known nodes as network policy peers. + * If none are known, defaults to 0.0.0.0/0 and logs a warning. + */ +export function kubeNodes(): V1NetworkPolicyPeer[] { + const policies = buildNodePolicies([...nodeSet]); + if (policies.length > 0) return policies; + + log.warn("Unable to get Node CIDRs, defaulting to 0.0.0.0/0"); + return [anywhere]; +} + +/** + * When a node is created or updated, if it's Ready, add its IP to the set, + * rebuild the policies, and update the NetworkPolicies. + */ +export async function updateKubeNodesFromCreateUpdate(node: kind.Node) { + const ip = getNodeInternalIP(node); + if (ip) nodeSet.add(ip); + + await updateKubeNodesNetworkPolicies(); +} + +/** + * When a node is deleted, remove its IP from the set, rebuild the policies, + * and update the NetworkPolicies. + */ +export async function updateKubeNodesFromDelete(node: kind.Node) { + const ip = getNodeInternalIP(node); + if (ip) nodeSet.delete(ip); + + await updateKubeNodesNetworkPolicies(); +} + +/** + * Fetch all Kubernetes nodes. + */ +async function fetchKubernetesNodes(): Promise> { + return K8s(kind.Node).Get(); +} + +/** + * Update all NetworkPolicies labeled with uds/generated=KubeNodes to + * reflect the given node CIDRs. + */ +export async function updateKubeNodesNetworkPolicies() { + const newNodes = buildNodePolicies([...nodeSet]); + const netPols = await K8s(kind.NetworkPolicy) + .WithLabel("uds/generated", RemoteGenerated.KubeNodes) + .Get(); + + for (const netPol of netPols.items) { + if (!netPol.spec) { + log.warn( + `KubeNodes NetworkPolicy ${netPol.metadata?.namespace}/${netPol.metadata?.name} is missing spec.`, + ); + continue; + } + + let updateRequired = false; + if (netPol.spec.egress) { + netPol.spec.egress[0] = netPol.spec.egress[0] || { to: [] }; + const oldNodes = netPol.spec.egress[0].to; + if (!R.equals(oldNodes, newNodes)) { + updateRequired = true; + netPol.spec.egress[0].to = newNodes; + } + } else if (netPol.spec.ingress) { + netPol.spec.ingress[0] = netPol.spec.ingress[0] || { from: [] }; + const oldNodes = netPol.spec.ingress[0].from; + if (!R.equals(oldNodes, newNodes)) { + updateRequired = true; + netPol.spec.ingress[0].from = newNodes; + } + } + + // If the policy required a change, apply the new policy + if (updateRequired) { + if (netPol.metadata) { + // Remove managed fields to prevent server-side apply errors + netPol.metadata.managedFields = undefined; + } + + log.debug( + `Updating KubeNodes NetworkPolicy ${netPol.metadata?.namespace}/${netPol.metadata?.name} with new CIDRs.`, + ); + + try { + await K8s(kind.NetworkPolicy).Apply(netPol, { force: true }); + } catch (err) { + let message = err.data?.message || "Unknown error while applying KubeNode network policies"; + if (UDSConfig.kubeNodeCidrs) { + message += + ", ensure that the KUBENODE_CIDRS override configured for the operator is correct."; + } + throw new Error(message); + } + } + } +} + +/** + * Build V1NetworkPolicyPeer array from a list of node IPs. + */ +function buildNodePolicies(nodeIPs: string[]): V1NetworkPolicyPeer[] { + return nodeIPs.map(ip => ({ + ipBlock: { + cidr: format32cidr(ip), + }, + })); +} + +/** + * Utility function conditionally format an IP as a 32-bit CIDR. + */ +function format32cidr(ip: string): string { + // Check if the input already appears to have CIDR notation + if (ip.includes("/")) { + return ip; + } + // If not, append "/32" + return `${ip}/32`; +} + +/** + * Utility function to get the InternalIP of a node. + */ +function getNodeInternalIP(node: kind.Node): string | undefined { + return node.status?.addresses?.find((addr: V1NodeAddress) => addr.type === "InternalIP")?.address; +} diff --git a/src/pepr/operator/controllers/network/policies.ts b/src/pepr/operator/controllers/network/policies.ts index e24ffc434..6aaa91fe8 100644 --- a/src/pepr/operator/controllers/network/policies.ts +++ b/src/pepr/operator/controllers/network/policies.ts @@ -160,6 +160,13 @@ export async function networkPolicies(pkg: UDSPackage, namespace: string) { message += ", ensure that the KUBEAPI_CIDR override configured for the operator is correct."; } + if ( + UDSConfig.kubeNodeCidrs && + policy.metadata.labels["uds/generated"] === RemoteGenerated.KubeNodes + ) { + message += + ", ensure that the KUBENODE_CIDRS override configured for the operator is correct."; + } throw new Error(message); } } diff --git a/src/pepr/operator/crd/generated/package-v1alpha1.ts b/src/pepr/operator/crd/generated/package-v1alpha1.ts index 1a03f52a6..837037cdf 100644 --- a/src/pepr/operator/crd/generated/package-v1alpha1.ts +++ b/src/pepr/operator/crd/generated/package-v1alpha1.ts @@ -208,6 +208,7 @@ export enum RemoteGenerated { CloudMetadata = "CloudMetadata", IntraNamespace = "IntraNamespace", KubeAPI = "KubeAPI", + KubeNodes = "KubeNodes", } export interface Expose { diff --git a/src/pepr/operator/crd/sources/package/v1alpha1.ts b/src/pepr/operator/crd/sources/package/v1alpha1.ts index f52587bd8..1b06e86e6 100644 --- a/src/pepr/operator/crd/sources/package/v1alpha1.ts +++ b/src/pepr/operator/crd/sources/package/v1alpha1.ts @@ -87,7 +87,7 @@ const allow = { remoteGenerated: { description: "Custom generated remote selector for the policy", type: "string", - enum: ["KubeAPI", "IntraNamespace", "CloudMetadata", "Anywhere"], + enum: ["KubeAPI", "KubeNodes", "IntraNamespace", "CloudMetadata", "Anywhere"], }, remoteCidr: { description: "Custom generated policy CIDR", diff --git a/src/pepr/operator/index.ts b/src/pepr/operator/index.ts index 1e8882cb1..df2b051b5 100644 --- a/src/pepr/operator/index.ts +++ b/src/pepr/operator/index.ts @@ -14,6 +14,13 @@ import { updateAPIServerCIDRFromService, } from "./controllers/network/generators/kubeAPI"; +// Controller imports +import { + initAllNodesTarget, + updateKubeNodesFromCreateUpdate, + updateKubeNodesFromDelete, +} from "./controllers/network/generators/kubeNodes"; + // CRD imports import { UDSExemption, UDSPackage } from "./crd"; import { validator } from "./crd/validators/package-validator"; @@ -35,6 +42,12 @@ if (process.env.PEPR_WATCH_MODE === "true" || process.env.PEPR_MODE === "dev") { void initAPIServerCIDR(); } +// Pre-populate the Node CIDR list since we are not persisting it +// Note ignore any errors since the watch will still be running hereafter +if (process.env.PEPR_WATCH_MODE === "true" || process.env.PEPR_MODE === "dev") { + void initAllNodesTarget(); +} + // Watch for changes to the API server EndpointSlice and update the API server CIDR // Skip if a CIDR is defined in the UDS Config if (!UDSConfig.kubeApiCidr) { @@ -83,3 +96,13 @@ When(UDSPackage) log.info("Identity and Authorization layer removed, operator will NOT handle SSO."); UDSConfig.isIdentityDeployed = false; }); + +// Watch for changes to the Nodes and update the Node CIDR list +if (!UDSConfig.kubeNodeCidrs) { + When(a.Node).IsCreatedOrUpdated().Reconcile(updateKubeNodesFromCreateUpdate); +} + +// Watch for Node deletions and update the Node CIDR list +if (!UDSConfig.kubeNodeCidrs) { + When(a.Node).IsDeleted().Reconcile(updateKubeNodesFromDelete); +} diff --git a/src/pepr/uds-operator-config/values.yaml b/src/pepr/uds-operator-config/values.yaml index 680a2f07a..24df6a49e 100644 --- a/src/pepr/uds-operator-config/values.yaml +++ b/src/pepr/uds-operator-config/values.yaml @@ -10,6 +10,7 @@ operator: UDS_LOG_LEVEL: "###ZARF_VAR_UDS_LOG_LEVEL###" AUTHSERVICE_REDIS_URI: "###ZARF_VAR_AUTHSERVICE_REDIS_URI###" KUBEAPI_CIDR: "" + KUBENODE_CIDRS: "" # Allow Pepr watch to be configurable to react to dropped connections faster PEPR_LAST_SEEN_LIMIT_SECONDS: "300" # Allow Pepr to re-list resources more frequently to avoid missing resources diff --git a/src/prometheus-stack/chart/templates/uds-package.yaml b/src/prometheus-stack/chart/templates/uds-package.yaml index 3ba1bc1b6..7abbfda21 100644 --- a/src/prometheus-stack/chart/templates/uds-package.yaml +++ b/src/prometheus-stack/chart/templates/uds-package.yaml @@ -56,6 +56,12 @@ spec: app.kubernetes.io/name: prometheus description: "Metrics Scraping" + - direction: Egress + remoteGenerated: KubeNodes + selector: + app.kubernetes.io/name: prometheus + description: "Metrics Scraping of Kube Nodes" + - direction: Ingress remoteNamespace: grafana remoteSelector: