diff --git a/cmd/ch-k8s-lbaas-controller/ch-k8s-lbaas-controller.go b/cmd/ch-k8s-lbaas-controller/ch-k8s-lbaas-controller.go index d49894c..361e173 100644 --- a/cmd/ch-k8s-lbaas-controller/ch-k8s-lbaas-controller.go +++ b/cmd/ch-k8s-lbaas-controller/ch-k8s-lbaas-controller.go @@ -28,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" + // Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters). // _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" @@ -90,6 +91,8 @@ func main() { servicesInformer := kubeInformerFactory.Core().V1().Services() nodesInformer := kubeInformerFactory.Core().V1().Nodes() endpointsInformer := kubeInformerFactory.Core().V1().Endpoints() + networkPoliciesInformer := kubeInformerFactory.Networking().V1().NetworkPolicies() + podsInformer := kubeInformerFactory.Core().V1().Pods() // TODO: I don't want to be informed about pods. Just need to list them modelGenerator, err := controller.NewLoadBalancerModelGenerator( fileCfg.BackendLayer, @@ -97,6 +100,8 @@ func main() { servicesInformer.Lister(), nodesInformer.Lister(), endpointsInformer.Lister(), + networkPoliciesInformer.Lister(), + podsInformer.Lister(), ) if fileCfg.BackendLayer != config.BackendLayerNodePort { @@ -116,6 +121,7 @@ func main() { servicesInformer, nodesInformer, endpointsInformer, + networkPoliciesInformer, l3portmanager, agentController, modelGenerator, diff --git a/internal/agent/agent.go b/internal/agent/agent.go index ab4d584..27dfa23 100644 --- a/internal/agent/agent.go +++ b/internal/agent/agent.go @@ -274,7 +274,7 @@ func (h *ApplyHandlerv1) preflightCheck(w http.ResponseWriter, r *http.Request) } func (h *ApplyHandlerv1) ProcessRequest(lbcfg *model.LoadBalancer) (int, string) { - klog.V(1).Infof("received config: %#v", lbcfg) + klog.Infof("received config: %#v", lbcfg) changed, err := h.KeepalivedConfig.WriteWithRollback(lbcfg) if err != nil { diff --git a/internal/agent/nftables_generator.go b/internal/agent/nftables_generator.go index 4689f73..b6da690 100644 --- a/internal/agent/nftables_generator.go +++ b/internal/agent/nftables_generator.go @@ -31,19 +31,51 @@ var ( {{ $cfg := . }} table {{ .FilterTableType }} {{ .FilterTableName }} { chain {{ .FilterForwardChainName }} { + {{- range $dest := $cfg.PolicyAssignments }} + {{- range $pol := $dest.NetworkPolicies }} + {{- if eq ((index $cfg.NetworkPolicies $pol).Ports | len) 0 }} + ct mark {{ $cfg.FWMarkBits | printf "0x%x" }} and {{ $cfg.FWMarkMask | printf "0x%x" }} ip daddr {{ $dest.Address }} jump {{ $pol }}; + {{- else }} + {{- range $port := (index $cfg.NetworkPolicies $pol).Ports }} + ct mark {{ $cfg.FWMarkBits | printf "0x%x" }} and {{ $cfg.FWMarkMask | printf "0x%x" }} ip daddr {{ $dest.Address }} {{ $port.Protocol }} {{- if $port.Port }} dport {{ $port.Port -}} {{- if $port.EndPort -}} - {{- $port.EndPort -}} {{- end -}} {{- end }} jump {{ $pol }}; + {{- end }} + {{- end }} + {{- end }} + {{- end }} + ct mark {{ $cfg.NPMarkBits | printf "0x%x" }} or {{ $cfg.FWMarkBits | printf "0x%x" }} drop; ct mark {{ $cfg.FWMarkBits | printf "0x%x" }} and {{ $cfg.FWMarkMask | printf "0x%x" }} accept; } + + {{- range $policy := $cfg.NetworkPolicies }} + chain {{ $policy.Name }} { + mark set {{ $cfg.NPMarkBits | printf "0x%x" }} or {{ $cfg.FWMarkBits | printf "0x%x" }} ct mark set meta mark + {{- range $index, $ipblock := $policy.AllowedIPBlocks }} + ip saddr {{ $ipblock.Cidr }} {{ if eq ($ipblock.Except | len) 0 -}} accept {{- else -}} jump {{ $policy.Name }}-cidr{{ $index -}} {{- end }}; + {{- end }} + return; + } + {{- range $index, $ipblock := $policy.AllowedIPBlocks }} + {{- if ne ($ipblock.Except | len) 0 }} + chain {{ $policy.Name }}-cidr{{ $index }} { + {{- range $except := $ipblock.Except }} + ip saddr {{ $except }} return; + {{- end }} + accept; + } + {{- end }} + {{- end }} + {{- end }} } table ip {{ .NATTableName }} { chain {{ .NATPreroutingChainName }} { -{{ range $fwd := .Forwards }} -{{ if ne ($fwd.DestinationAddresses | len) 0 }} - ip daddr {{ $fwd.InboundIP }} {{ $fwd.Protocol }} dport {{ $fwd.InboundPort }} mark set {{ $cfg.FWMarkBits | printf "0x%x" }} and {{ $cfg.FWMarkMask | printf "0x%x" }} ct mark set meta mark dnat to numgen inc mod {{ $fwd.DestinationAddresses | len }} map { +{{- range $fwd := .Forwards }} +{{- if ne ($fwd.DestinationAddresses | len) 0 }} + ip daddr {{ $fwd.InboundIP }} {{ $fwd.Protocol }} dport {{ $fwd.InboundPort }} mark set {{ $cfg.FWMarkBits | printf "0x%x" }} and {{ $cfg.FWMarkMask | printf "0x%x" }} ct mark set meta mark dnat to numgen inc mod {{ $fwd.DestinationAddresses | len }} map { {{- range $index, $daddr := $fwd.DestinationAddresses }}{{ $index }} : {{ $daddr }}, {{ end -}} } : {{ $fwd.DestinationPort }}; -{{ end }} -{{ end }} +{{- end }} +{{- end }} } chain {{ .NATPostroutingChainName }} { @@ -55,6 +87,28 @@ table ip {{ .NATTableName }} { ErrProtocolNotSupported = fmt.Errorf("Protocol is not supported") ) +type allowedIPBlock struct { + Cidr string + Except []string +} + +type policyPort struct { + Protocol string + Port *int32 + EndPort *int32 +} + +type networkPolicy struct { + Name string + AllowedIPBlocks []allowedIPBlock + Ports []policyPort +} + +type policyAssignment struct { + Address string + NetworkPolicies []string +} + type nftablesForward struct { Protocol string InboundIP string @@ -72,7 +126,10 @@ type nftablesConfig struct { NATPreroutingChainName string FWMarkBits uint32 FWMarkMask uint32 + NPMarkBits uint32 Forwards []nftablesForward + NetworkPolicies map[string]networkPolicy + PolicyAssignments []policyAssignment } type NftablesGenerator struct { @@ -85,7 +142,54 @@ func copyAddresses(in []string) []string { return result } -func (g *NftablesGenerator) mapProtocol(k8sproto corev1.Protocol) (string, error) { +func copyIPBlocks(in []model.AllowedIPBlock) []allowedIPBlock { + result := make([]allowedIPBlock, len(in)) + for i, block := range in { + result[i].Cidr = block.Cidr + result[i].Except = copyAddresses(block.Except) + } + return result +} + +func copyPolicyPorts(in []model.PolicyPort) ([]policyPort, error) { + result := make([]policyPort, len(in)) + var err error + for i, port := range in { + result[i].Protocol, err = mapProtocol(port.Protocol) + if err != nil { + return nil, err + } + result[i].Port = port.Port + result[i].EndPort = port.EndPort + } + return result, nil +} + +func copyNetworkPolicies(in []model.NetworkPolicy) ([]networkPolicy, error) { + result := make([]networkPolicy, len(in)) + var err error + for i, policy := range in { + result[i].Name = policy.Name + result[i].AllowedIPBlocks = copyIPBlocks(policy.AllowedIPBlocks) + result[i].Ports, err = copyPolicyPorts(policy.Ports) + if err != nil { + return nil, err + } + } + return result, nil +} + +func copyPolicyAssignment(in []model.PolicyAssignment) []policyAssignment { + result := make([]policyAssignment, len(in)) + for i, assignment := range in { + result[i].Address = assignment.Address + result[i].NetworkPolicies = copyAddresses(assignment.NetworkPolicies) + } + return result +} + +// Maps from k8s.io/api/core/v1.Protocol objects to strings understood by nftables +func mapProtocol(k8sproto corev1.Protocol) (string, error) { switch k8sproto { case corev1.ProtocolTCP: return "tcp", nil @@ -96,6 +200,7 @@ func (g *NftablesGenerator) mapProtocol(k8sproto corev1.Protocol) (string, error } } +// Generates a config suitable for nftablesTemplate from a LoadBalancer model func (g *NftablesGenerator) GenerateStructuredConfig(m *model.LoadBalancer) (*nftablesConfig, error) { result := &nftablesConfig{ FilterTableName: g.Cfg.FilterTableName, @@ -106,12 +211,15 @@ func (g *NftablesGenerator) GenerateStructuredConfig(m *model.LoadBalancer) (*nf NATPreroutingChainName: g.Cfg.NATPreroutingChainName, FWMarkBits: g.Cfg.FWMarkBits, FWMarkMask: g.Cfg.FWMarkMask, + NPMarkBits: g.Cfg.NPMarkBits, Forwards: []nftablesForward{}, + NetworkPolicies: map[string]networkPolicy{}, + PolicyAssignments: []policyAssignment{}, } for _, ingress := range m.Ingress { for _, port := range ingress.Ports { - mappedProtocol, err := g.mapProtocol(port.Protocol) + mappedProtocol, err := mapProtocol(port.Protocol) if err != nil { return nil, err } @@ -143,6 +251,15 @@ func (g *NftablesGenerator) GenerateStructuredConfig(m *model.LoadBalancer) (*nf return fwdA.InboundPort < fwdB.InboundPort }) + result.PolicyAssignments = copyPolicyAssignment(m.PolicyAssignments) + policies, err := copyNetworkPolicies(m.NetworkPolicies) + if err != nil { + return nil, err + } + for _, policy := range policies { + result.NetworkPolicies[policy.Name] = policy + } + return result, nil } diff --git a/internal/config/config.go b/internal/config/config.go index 407d4f6..fa69d04 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -64,6 +64,7 @@ type Nftables struct { NATPostroutingChainName string `toml:"nat-postrouting-chain"` FWMarkBits uint32 `toml:"fwmark-bits"` FWMarkMask uint32 `toml:"fwmark-mask"` + NPMarkBits uint32 `toml:"npmark-bits"` Service ServiceConfig `toml:"service"` } @@ -154,6 +155,10 @@ func FillNftablesConfig(cfg *Nftables) { cfg.FWMarkMask = 1 } + if cfg.NPMarkBits == 0 { + cfg.NPMarkBits = 2 + } + defaultStringList(&cfg.Service.ReloadCommand, []string{"sudo", "systemctl", "reload", "nftables"}) defaultStringList(&cfg.Service.StartCommand, []string{"sudo", "systemctl", "restart", "nftables"}) } diff --git a/internal/controller/controller.go b/internal/controller/controller.go index 8f2cf4e..a547798 100644 --- a/internal/controller/controller.go +++ b/internal/controller/controller.go @@ -27,6 +27,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" coreinformers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -82,6 +83,7 @@ func NewController( serviceInformer coreinformers.ServiceInformer, nodeInformer coreinformers.NodeInformer, endpointsInformer coreinformers.EndpointsInformer, + networkPoliciesInformer networkinginformers.NetworkPolicyInformer, l3portmanager openstack.L3PortManager, agentController AgentController, generator LoadBalancerModelGenerator, @@ -167,6 +169,17 @@ func NewController( }) } + if networkPoliciesInformer != nil { + networkPoliciesInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controller.handleAuxUpdated, + UpdateFunc: func(old, new interface{}) { + klog.Info("UpdateFunc called") + controller.handleAuxUpdated(new) + }, + DeleteFunc: controller.handleAuxUpdated, + }) + } + return controller, nil } @@ -245,7 +258,7 @@ func (c *Controller) periodicCleanup() { func (c *Controller) handleObject(obj interface{}) { var object metav1.Object var ok bool - klog.Info("handleObject called") + klog.Infof("handleObject called with %T", obj) if object, ok = obj.(metav1.Object); !ok { klog.V(5).Infof("ignoring non-castable object in handleObject; expecting deletion event") return diff --git a/internal/controller/controller_test.go b/internal/controller/controller_test.go index 6842609..93eeaf3 100644 --- a/internal/controller/controller_test.go +++ b/internal/controller/controller_test.go @@ -75,6 +75,7 @@ func (f *fixture) newController() (*Controller, kubeinformers.SharedInformerFact k8sI.Core().V1().Services(), k8sI.Core().V1().Nodes(), k8sI.Core().V1().Endpoints(), + k8sI.Networking().V1().NetworkPolicies(), ostesting.NewMockL3PortManager(), controllertesting.NewMockAgentController(), controllertesting.NewMockLoadBalancerModelGenerator(), diff --git a/internal/controller/model.go b/internal/controller/model.go index b3bc604..73df9f3 100644 --- a/internal/controller/model.go +++ b/internal/controller/model.go @@ -18,6 +18,7 @@ import ( "fmt" corelisters "k8s.io/client-go/listers/core/v1" + networkinglisters "k8s.io/client-go/listers/networking/v1" "github.com/cloudandheat/ch-k8s-lbaas/internal/config" "github.com/cloudandheat/ch-k8s-lbaas/internal/model" @@ -33,7 +34,9 @@ func NewLoadBalancerModelGenerator( l3portmanager openstack.L3PortManager, services corelisters.ServiceLister, nodes corelisters.NodeLister, - endpoints corelisters.EndpointsLister) (LoadBalancerModelGenerator, error) { + endpoints corelisters.EndpointsLister, + networkpolicies networkinglisters.NetworkPolicyLister, + pods corelisters.PodLister) (LoadBalancerModelGenerator, error) { switch backendLayer { case config.BackendLayerNodePort: return NewNodePortLoadBalancerModelGenerator( @@ -45,7 +48,7 @@ func NewLoadBalancerModelGenerator( ), nil case config.BackendLayerPod: return NewPodLoadBalancerModelGenerator( - l3portmanager, services, endpoints, + l3portmanager, services, endpoints, networkpolicies, pods, ), nil default: return nil, fmt.Errorf("invalid backend type: %q", backendLayer) diff --git a/internal/controller/model_pod.go b/internal/controller/model_pod.go index 7277528..3705ae2 100644 --- a/internal/controller/model_pod.go +++ b/internal/controller/model_pod.go @@ -18,7 +18,11 @@ import ( goerrors "errors" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" + networkinglisters "k8s.io/client-go/listers/networking/v1" "k8s.io/klog" @@ -31,19 +35,25 @@ var ( ) type PodLoadBalancerModelGenerator struct { - l3portmanager openstack.L3PortManager - services corelisters.ServiceLister - endpoints corelisters.EndpointsLister + l3portmanager openstack.L3PortManager + services corelisters.ServiceLister + networkpolicies networkinglisters.NetworkPolicyLister + endpoints corelisters.EndpointsLister + pods corelisters.PodLister } func NewPodLoadBalancerModelGenerator( l3portmanager openstack.L3PortManager, services corelisters.ServiceLister, - endpoints corelisters.EndpointsLister) *PodLoadBalancerModelGenerator { + endpoints corelisters.EndpointsLister, + networkpolicies networkinglisters.NetworkPolicyLister, + pods corelisters.PodLister) *PodLoadBalancerModelGenerator { return &PodLoadBalancerModelGenerator{ - l3portmanager: l3portmanager, - services: services, - endpoints: endpoints, + l3portmanager: l3portmanager, + services: services, + endpoints: endpoints, + networkpolicies: networkpolicies, + pods: pods, } } @@ -71,9 +81,96 @@ func (g *PodLoadBalancerModelGenerator) findPort(subset *corev1.EndpointSubset, return -1, errPortNotFoundInSubset } +func containsPort(port int32, proto *corev1.Protocol, portList []networkingv1.NetworkPolicyPort) bool { + klog.Infof("Looking for port %d proto %#v", port, *proto) + for _, p := range portList { + klog.Infof("%#v: port %d proto %#v", p, p.Port.IntVal, p.Protocol) + if *p.Protocol == *proto && + ((p.EndPort == nil && p.Port.IntVal == port) || + (p.EndPort != nil && p.Port.IntVal <= port && *p.EndPort >= port)) { + klog.Infof("Found port in %#v", p) + return true + } + } + return false +} + +func buildNetworkPolicy(in *networkingv1.NetworkPolicy) model.NetworkPolicy { + newPolicy := model.NetworkPolicy{ + Name: in.Name, + AllowedIPBlocks: []model.AllowedIPBlock{}, + Ports: []model.PolicyPort{}, + } + for _, ingress := range in.Spec.Ingress { + klog.Infof("Processing ingress %#v", ingress) + for _, port := range ingress.Ports { + newPort := model.PolicyPort{ + Protocol: *port.Protocol, + EndPort: port.EndPort, + } + if port.Port != nil { + newPort.Port = &port.Port.IntVal + } + klog.Infof("Adding proto %s port %d to %d", + newPort.Protocol, newPort.Port, newPort.EndPort) + newPolicy.Ports = append(newPolicy.Ports, newPort) + } + for _, from := range ingress.From { + if from.IPBlock == nil { + continue + } + newBlock := model.AllowedIPBlock{ + Cidr: from.IPBlock.CIDR, + } + for _, except := range from.IPBlock.Except { + newBlock.Except = append(newBlock.Except, except) + } + klog.Infof("Adding block %s with %d excepts", + newBlock.Cidr, len(newBlock.Except)) + newPolicy.AllowedIPBlocks = append(newPolicy.AllowedIPBlocks, newBlock) + } + } + return newPolicy +} + func (g *PodLoadBalancerModelGenerator) GenerateModel(portAssignment map[string]string) (*model.LoadBalancer, error) { result := &model.LoadBalancer{} + allPolicies, err := g.networkpolicies.List(labels.Everything()) + if err != nil { + return nil, err + } + + networkPolicies := make([]model.NetworkPolicy, 0, len(allPolicies)) + policyMap := map[string][]string{} // dest addr => ingress ipBlock + for _, pol := range allPolicies { + klog.Infof("Processing policy %s", pol.Name) + if len(pol.Spec.Ingress) == 0 { + klog.Infof("Skipping because policy has no ingress rule") + continue + } + + networkPolicies = append(networkPolicies, buildNetworkPolicy(pol)) + + // build policyMap + selector, err := metav1.LabelSelectorAsSelector(&pol.Spec.PodSelector) + if err != nil { + return nil, err + } + + pods, err := g.pods.Pods(pol.Namespace).List(selector) + if err != nil { + return nil, err + } + for _, pod := range pods { + for _, addr := range pod.Status.PodIPs { + klog.Infof("Adding policy %s to address %s", pol.Name, addr.IP) + policyMap[addr.IP] = append(policyMap[addr.IP], pol.Name) + } + } + } + klog.Infof("Done getting %d policies applying to %d addresses", len(allPolicies), len(policyMap)) + ingressMap := map[string]model.IngressIP{} for serviceKey, portID := range portAssignment { @@ -158,6 +255,14 @@ func (g *PodLoadBalancerModelGenerator) GenerateModel(portAssignment map[string] result.Ingress[i] = ingress i++ } + result.NetworkPolicies = networkPolicies + result.PolicyAssignments = make([]model.PolicyAssignment, len(policyMap)) + i = 0 + for addr, policies := range policyMap { + result.PolicyAssignments[i].Address = addr + result.PolicyAssignments[i].NetworkPolicies = policies + i++ + } return result, nil } diff --git a/internal/controller/model_pod_test.go b/internal/controller/model_pod_test.go index 2f6d08a..3f483a4 100644 --- a/internal/controller/model_pod_test.go +++ b/internal/controller/model_pod_test.go @@ -18,6 +18,7 @@ import ( "testing" corev1 "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -35,10 +36,11 @@ type podGeneratorFixture struct { l3portmanager *ostesting.MockL3PortManager - kubeclient *k8sfake.Clientset - serviceLister []*corev1.Service - endpointsLister []*corev1.Endpoints - kubeobjects []runtime.Object + kubeclient *k8sfake.Clientset + serviceLister []*corev1.Service + endpointsLister []*corev1.Endpoints + networkpolicyLister []*networkingv1.NetworkPolicy + kubeobjects []runtime.Object } func newPodGeneratorFixture(t *testing.T) *podGeneratorFixture { @@ -47,6 +49,7 @@ func newPodGeneratorFixture(t *testing.T) *podGeneratorFixture { f.l3portmanager = ostesting.NewMockL3PortManager() f.serviceLister = []*corev1.Service{} f.endpointsLister = []*corev1.Endpoints{} + f.networkpolicyLister = []*networkingv1.NetworkPolicy{} f.kubeobjects = []runtime.Object{} return f @@ -57,6 +60,8 @@ func (f *podGeneratorFixture) newGenerator() (*PodLoadBalancerModelGenerator, ku k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc()) services := k8sI.Core().V1().Services() endpoints := k8sI.Core().V1().Endpoints() + networkpolicies := k8sI.Networking().V1().NetworkPolicies() + pods := k8sI.Core().V1().Pods() for _, s := range f.serviceLister { services.Informer().GetIndexer().Add(s) @@ -66,10 +71,16 @@ func (f *podGeneratorFixture) newGenerator() (*PodLoadBalancerModelGenerator, ku endpoints.Informer().GetIndexer().Add(e) } + for _, s := range f.networkpolicyLister { + networkpolicies.Informer().GetIndexer().Add(s) + } + g := NewPodLoadBalancerModelGenerator( f.l3portmanager, services.Lister(), endpoints.Lister(), + networkpolicies.Lister(), + pods.Lister(), ) return g, k8sI } @@ -84,6 +95,11 @@ func (f *podGeneratorFixture) addEndpoints(svc *corev1.Endpoints) { f.kubeobjects = append(f.kubeobjects, svc) } +func (f *podGeneratorFixture) addNetworkpolicy(pol *networkingv1.NetworkPolicy) { + f.networkpolicyLister = append(f.networkpolicyLister, pol) + f.kubeobjects = append(f.kubeobjects, pol) +} + func (f *podGeneratorFixture) runWith(body func(g *PodLoadBalancerModelGenerator)) { g, k8sI := f.newGenerator() stopCh := make(chan struct{}) diff --git a/internal/model/loadbalancer.go b/internal/model/loadbalancer.go index 4bf5078..a407946 100644 --- a/internal/model/loadbalancer.go +++ b/internal/model/loadbalancer.go @@ -20,12 +20,34 @@ import ( "github.com/golang-jwt/jwt" ) +type AllowedIPBlock struct { + Cidr string `json:"cidr"` + Except []string `json:"except"` +} + +type PolicyPort struct { + Protocol corev1.Protocol `json:"protocol"` + Port *int32 `json:"port,omitempty"` + EndPort *int32 `json:"end-port,omitempty"` +} + +type NetworkPolicy struct { + Name string `json:"name"` + AllowedIPBlocks []AllowedIPBlock `json:"allowed-ip-blocks"` + Ports []PolicyPort `json:"ports"` +} + +type PolicyAssignment struct { + Address string `json:"address"` + NetworkPolicies []string `json:"network-policies"` +} + type PortForward struct { Protocol corev1.Protocol `json:"protocol"` InboundPort int32 `json:"inbound-port"` DestinationAddresses []string `json:"destination-addresses"` DestinationPort int32 `json:"destination-port"` - Policy string `json:"policy"` + Policy string `json:"policy"` // TODO: <== What does this do } type IngressIP struct { @@ -34,7 +56,9 @@ type IngressIP struct { } type LoadBalancer struct { - Ingress []IngressIP `json:"ingress"` + Ingress []IngressIP `json:"ingress"` + NetworkPolicies []NetworkPolicy `json:"network-policies"` + PolicyAssignments []PolicyAssignment `json:"policy-assignments"` } type ConfigClaim struct {