diff --git a/README.md b/README.md index 1436fa83ac..e438491c33 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,4 @@ + # amazon-vpc-cni-k8s Networking plugin for pod networking in [Kubernetes](https://kubernetes.io/) using [Elastic Network Interfaces](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html) on AWS. @@ -421,6 +422,31 @@ Any of the WARM targets do not impact the scale of the branch ENI pods so you wi **NOTE!** Toggling `ENABLE_POD_ENI` from `true` to `false` will not detach the Trunk ENI from instance. To delete/detach the Trunk ENI from instance, you need recycle the instance. + +--- + +#### `POD_SECURITY_GROUP_ENFORCING_MODE` (v1.11.0+) + +Type: String + +Default: `strict` + +Valid Values: `strict`, `standard` + +Once `ENABLE_POD_ENI` is set to `true`, this value controls how the traffic of pods with security group behaves. + + * `strict` mode: all inbound/outbound traffic from pod with security group will be enforced by security group rules. This is the **default** mode if POD_SECURITY_GROUP_ENFORCING_MODE is not set. + + * `standard` mode: the traffic of pod with security group behaves same as pods without security group, except that each pod occupies a dedicated branch ENI. + * inbound traffic to pod with security group from another host will be enforced by security group rules. + * outbound traffic from pod with security group to another host in same VPC will be enforced by security group rules. + * inbound/outbound traffic from another pod on same host or another service on same host(such as kubelet/nodeLocalDNS) won't be enforced by security group rules. + * outbound traffic from pod with security group to IP address outside VPC + * if externalSNAT enabled, traffic won't be SNATed, thus will be enforced by security group rules. + * if externalSNAT disabled, traffic will be SNATed via eth0, thus will only be enforced by security group associated with eth0. + +**NOTE!**: To make new behavior be in effect after switching the mode, existing pods with security group must be recycled. Alternatively you can restart the nodes as well. + --- #### `DISABLE_TCP_EARLY_DEMUX` (v1.7.3+) diff --git a/cmd/routed-eni-cni-plugin/cni.go b/cmd/routed-eni-cni-plugin/cni.go index b96b1fafd8..0ead5a7181 100644 --- a/cmd/routed-eni-cni-plugin/cni.go +++ b/cmd/routed-eni-cni-plugin/cni.go @@ -25,6 +25,10 @@ import ( "strconv" "strings" + "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/cniutils" + + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/containernetworking/cni/pkg/skel" "github.com/containernetworking/cni/pkg/types" "github.com/containernetworking/cni/pkg/types/current" @@ -45,7 +49,6 @@ import ( const ipamdAddress = "127.0.0.1:50051" -const vlanInterfacePrefix = "vlan" const dummyVlanInterfacePrefix = "dummy" var version string @@ -62,6 +65,9 @@ type NetConf struct { // MTU for eth0 MTU string `json:"mtu"` + // PodSGEnforcingMode is the enforcing mode for Security groups for pods feature + PodSGEnforcingMode sgpp.EnforcingMode `json:"podSGEnforcingMode"` + PluginLogFile string `json:"pluginLogFile"` PluginLogLevel string `json:"pluginLogLevel"` @@ -91,8 +97,9 @@ func init() { func LoadNetConf(bytes []byte) (*NetConf, logger.Logger, error) { // Default config conf := NetConf{ - MTU: "9001", - VethPrefix: "eni", + MTU: "9001", + VethPrefix: "eni", + PodSGEnforcingMode: sgpp.DefaultEnforcingMode, } if err := json.Unmarshal(bytes, &conf); err != nil { @@ -208,9 +215,10 @@ func add(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap // Non-zero value means pods are using branch ENI if r.PodVlanId != 0 { - hostVethName = generateHostVethName(vlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) - err = driverClient.SetupPodENINetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.PodVlanId), r.PodENIMAC, - r.PodENISubnetGW, int(r.ParentIfIndex), mtu, log) + hostVethNamePrefix := sgpp.BuildHostVethNamePrefix(conf.VethPrefix, conf.PodSGEnforcingMode) + hostVethName = generateHostVethName(hostVethNamePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) + err = driverClient.SetupBranchENIPodNetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.PodVlanId), r.PodENIMAC, + r.PodENISubnetGW, int(r.ParentIfIndex), mtu, conf.PodSGEnforcingMode, log) // This is a dummyVlanInterfaceName generated to identify dummyVlanInterface // which will be created for PPSG scenario to pass along the vlanId information @@ -226,7 +234,7 @@ func add(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap // build hostVethName // Note: the maximum length for linux interface name is 15 hostVethName = generateHostVethName(conf.VethPrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) - err = driverClient.SetupNS(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.DeviceNumber), r.VPCv4CIDRs, r.UseExternalSNAT, mtu, log) + err = driverClient.SetupPodNetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.DeviceNumber), mtu, log) } if err != nil { @@ -314,42 +322,14 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap return errors.Wrap(err, "del cmd: failed to load k8s config from args") } - prevResult, ok := conf.PrevResult.(*current.Result) - - // Try to use prevResult if available - // prevResult might not be availabe, if we are still using older cni spec < 0.4.0. - // So we should fallback to the old clean up method - if ok { - dummyVlanInterfaceName := generateHostVethName(dummyVlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) - for _, iface := range prevResult.Interfaces { - if iface.Name == dummyVlanInterfaceName { - podVlanId, err := strconv.Atoi(iface.Mac) - if err != nil { - log.Errorf("Failed to parse vlanId from prevResult: %v", err) - return errors.Wrap(err, "del cmd: failed to parse vlanId from prevResult") - } - - // podVlanID can not be 0 as we add dummyVlanInterface only for ppsg - // if it is 0 then we should return an error - if podVlanId == 0 { - log.Errorf("Found SG pod:%s namespace:%s with 0 vlanID", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE) - return errors.Wrap(err, "del cmd: found Incorrect 0 vlandId for ppsg") - } - - if isNetnsEmpty(args.Netns) { - log.Infof("Ignoring TeardownPodENI as Netns is empty for SG pod:%s namespace: %s containerID:%s", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_INFRA_CONTAINER_ID) - return nil - } - - err = cleanUpPodENI(podVlanId, log, args.ContainerID, driverClient) - if err != nil { - return err - } - log.Infof("Received del network response for pod %s namespace %s sandbox %s with vlanId: %v", string(k8sArgs.K8S_POD_NAME), - string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), podVlanId) - return nil - } - } + handled, err := tryDelWithPrevResult(driverClient, conf, k8sArgs, args.IfName, args.Netns, log) + if err != nil { + return errors.Wrap(err, "del cmd: failed to delete with prevResult") + } + if handled { + log.Infof("Handled CNI del request with prevResult: ContainerID(%s) Netns(%s) IfName(%s) PodNamespace(%s) PodName(%s)", + args.ContainerID, args.Netns, args.IfName, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) + return nil } // notify local IP address manager to free secondary IP @@ -420,9 +400,9 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap log.Infof("Ignoring TeardownPodENI as Netns is empty for SG pod:%s namespace: %s containerID:%s", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_INFRA_CONTAINER_ID) return nil } - err = driverClient.TeardownPodENINetwork(int(r.PodVlanId), log) + err = driverClient.TeardownBranchENIPodNetwork(addr, int(r.PodVlanId), conf.PodSGEnforcingMode, log) } else { - err = driverClient.TeardownNS(addr, int(r.DeviceNumber), log) + err = driverClient.TeardownPodNetwork(addr, int(r.DeviceNumber), log) } if err != nil { @@ -436,14 +416,43 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap return nil } -func cleanUpPodENI(podVlanId int, log logger.Logger, containerId string, driverClient driver.NetworkAPIs) error { - err := driverClient.TeardownPodENINetwork(podVlanId, log) - if err != nil { - log.Errorf("Failed on TeardownPodNetwork for container ID %s: %v", - containerId, err) - return errors.Wrap(err, "del cmd: failed on tear down pod network") +// tryDelWithPrevResult will try to process CNI delete request without IPAMD. +// returns true if the del request is handled. +func tryDelWithPrevResult(driverClient driver.NetworkAPIs, conf *NetConf, k8sArgs K8sArgs, contVethName string, netNS string, log logger.Logger) (bool, error) { + // prevResult might not be available, if we are still using older cni spec < 0.4.0. + prevResult, ok := conf.PrevResult.(*current.Result) + if !ok { + return false, nil } - return nil + + dummyIfaceName := generateHostVethName(dummyVlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME)) + _, dummyIface, found := cniutils.FindInterfaceByName(prevResult.Interfaces, dummyIfaceName) + if !found { + return false, nil + } + podVlanID, err := strconv.Atoi(dummyIface.Mac) + if err != nil || podVlanID == 0 { + return true, errors.Errorf("malformed vlanID in prevResult: %s", dummyIface.Mac) + } + if isNetnsEmpty(netNS) { + log.Infof("Ignoring TeardownPodENI as Netns is empty for SG pod:%s namespace: %s containerID:%s", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE, k8sArgs.K8S_POD_INFRA_CONTAINER_ID) + return true, nil + } + + containerIfaceIndex, _, found := cniutils.FindInterfaceByName(prevResult.Interfaces, contVethName) + if !found { + return false, errors.Errorf("cannot find contVethName %s in prevResult", contVethName) + } + containerIPs := cniutils.FindIPConfigsByIfaceIndex(prevResult.IPs, containerIfaceIndex) + if len(containerIPs) != 1 { + return false, errors.Errorf("found %d containerIP for %v in prevResult", len(containerIPs), contVethName) + } + containerIP := containerIPs[0].Address + + if err := driverClient.TeardownBranchENIPodNetwork(&containerIP, podVlanID, conf.PodSGEnforcingMode, log); err != nil { + return true, err + } + return true, nil } // Scope usage of this function to only SG pods scenario (https://harbinger.amazon.com/notices/65203) diff --git a/cmd/routed-eni-cni-plugin/cni_test.go b/cmd/routed-eni-cni-plugin/cni_test.go index f0f469104c..af3ed261f0 100644 --- a/cmd/routed-eni-cni-plugin/cni_test.go +++ b/cmd/routed-eni-cni-plugin/cni_test.go @@ -19,6 +19,11 @@ import ( "net" "testing" + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" + "github.com/aws/aws-sdk-go/aws" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/containernetworking/cni/pkg/skel" "github.com/containernetworking/cni/pkg/types" "github.com/golang/mock/gomock" @@ -52,8 +57,9 @@ var netConf = &NetConf{ Name: cniName, Type: cniType, }, - PluginLogLevel: pluginLogLevel, - PluginLogFile: pluginLogFile, + PodSGEnforcingMode: sgpp.DefaultEnforcingMode, + PluginLogLevel: pluginLogLevel, + PluginLogFile: pluginLogFile, } func setup(t *testing.T) (*gomock.Controller, @@ -95,10 +101,8 @@ func TestCmdAdd(t *testing.T) { IP: net.ParseIP(addNetworkReply.IPv4Addr), Mask: net.IPv4Mask(255, 255, 255, 255), } - v6Addr := &net.IPNet{} - - mocksNetwork.EXPECT().SetupNS(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, - v4Addr, v6Addr, int(addNetworkReply.DeviceNumber), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mocksNetwork.EXPECT().SetupPodNetwork(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, + v4Addr, nil, int(addNetworkReply.DeviceNumber), gomock.Any(), gomock.Any()).Return(nil) mocksTypes.EXPECT().PrintResult(gomock.Any(), gomock.Any()).Return(nil) @@ -160,8 +164,8 @@ func TestCmdAddErrSetupPodNetwork(t *testing.T) { Mask: net.IPv4Mask(255, 255, 255, 255), } - mocksNetwork.EXPECT().SetupNS(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, - addr, nil, int(addNetworkReply.DeviceNumber), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("error on SetupPodNetwork")) + mocksNetwork.EXPECT().SetupPodNetwork(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, + addr, nil, int(addNetworkReply.DeviceNumber), gomock.Any(), gomock.Any()).Return(errors.New("error on SetupPodNetwork")) // when SetupPodNetwork fails, expect to return IP back to datastore delNetworkReply := &rpc.DelNetworkReply{Success: true, IPv4Addr: ipAddr, DeviceNumber: devNum} @@ -200,7 +204,7 @@ func TestCmdDel(t *testing.T) { Mask: net.IPv4Mask(255, 255, 255, 255), } - mocksNetwork.EXPECT().TeardownNS(addr, int(delNetworkReply.DeviceNumber), gomock.Any()).Return(nil) + mocksNetwork.EXPECT().TeardownPodNetwork(addr, int(delNetworkReply.DeviceNumber), gomock.Any()).Return(nil) err := del(cmdArgs, mocksTypes, mocksGRPC, mocksRPC, mocksNetwork) assert.Nil(t, err) @@ -261,7 +265,7 @@ func TestCmdDelErrTeardown(t *testing.T) { Mask: net.IPv4Mask(255, 255, 255, 255), } - mocksNetwork.EXPECT().TeardownNS(addr, int(delNetworkReply.DeviceNumber), gomock.Any()).Return(errors.New("error on teardown")) + mocksNetwork.EXPECT().TeardownPodNetwork(addr, int(delNetworkReply.DeviceNumber), gomock.Any()).Return(errors.New("error on teardown")) err := del(cmdArgs, mocksTypes, mocksGRPC, mocksRPC, mocksNetwork) assert.Error(t, err) @@ -294,8 +298,8 @@ func TestCmdAddForPodENINetwork(t *testing.T) { IP: net.ParseIP(addNetworkReply.IPv4Addr), Mask: net.IPv4Mask(255, 255, 255, 255), } - mocksNetwork.EXPECT().SetupPodENINetwork(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, addr, nil, 1, "eniHardwareAddr", - "10.0.0.1", 2, gomock.Any(), gomock.Any()).Return(nil) + mocksNetwork.EXPECT().SetupBranchENIPodNetwork(gomock.Any(), cmdArgs.IfName, cmdArgs.Netns, addr, nil, 1, "eniHardwareAddr", + "10.0.0.1", 2, gomock.Any(), sgpp.EnforcingModeStrict, gomock.Any()).Return(nil) mocksTypes.EXPECT().PrintResult(gomock.Any(), gomock.Any()).Return(nil) @@ -327,8 +331,415 @@ func TestCmdDelForPodENINetwork(t *testing.T) { mockC.EXPECT().DelNetwork(gomock.Any(), gomock.Any()).Return(delNetworkReply, nil) - mocksNetwork.EXPECT().TeardownPodENINetwork(1, gomock.Any()).Return(nil) + addr := &net.IPNet{ + IP: net.ParseIP(delNetworkReply.IPv4Addr), + Mask: net.IPv4Mask(255, 255, 255, 255), + } + mocksNetwork.EXPECT().TeardownBranchENIPodNetwork(addr, 1, sgpp.EnforcingModeStrict, gomock.Any()).Return(nil) err := del(cmdArgs, mocksTypes, mocksGRPC, mocksRPC, mocksNetwork) assert.Nil(t, err) } + +func Test_tryDelWithPrevResult(t *testing.T) { + type teardownBranchENIPodNetworkCall struct { + containerAddr *net.IPNet + vlanID int + podSGEnforcingMode sgpp.EnforcingMode + err error + } + type fields struct { + teardownBranchENIPodNetworkCalls []teardownBranchENIPodNetworkCall + } + type args struct { + conf *NetConf + k8sArgs K8sArgs + contVethName string + } + tests := []struct { + name string + fields fields + args args + want bool + wantErr error + }{ + { + name: "successfully deleted with information from prevResult - with enforcing mode standard", + fields: fields{ + teardownBranchENIPodNetworkCalls: []teardownBranchENIPodNetworkCall{ + { + containerAddr: &net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + vlanID: 7, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + }, + }, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "7", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + want: true, + }, + { + name: "successfully deleted with information from prevResult - with enforcing mode strict", + fields: fields{ + teardownBranchENIPodNetworkCalls: []teardownBranchENIPodNetworkCall{ + { + containerAddr: &net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + vlanID: 7, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + }, + }, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "7", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + want: true, + }, + { + name: "failed to delete due to teardownBranchENIPodNetworkCall failed", + fields: fields{ + teardownBranchENIPodNetworkCalls: []teardownBranchENIPodNetworkCall{ + { + containerAddr: &net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + vlanID: 7, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + err: errors.New("some error"), + }, + }, + }, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "7", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + wantErr: errors.New("some error"), + }, + { + name: "dummy interface don't exists", + fields: fields{}, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + want: false, + }, + { + name: "malformed vlanID in prevResult - xxx", + fields: fields{}, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "xxx", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + wantErr: errors.New("malformed vlanID in prevResult: xxx"), + }, + { + name: "malformed vlanID in prevResult - 0", + fields: fields{}, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "0", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + wantErr: errors.New("malformed vlanID in prevResult: 0"), + }, + { + name: "confVeth don't exists", + fields: fields{}, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "dummycc21c2d7785", + Mac: "7", + }, + }, + IPs: []*current.IPConfig{ + { + Version: "4", + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + Mask: net.CIDRMask(32, 32), + }, + Interface: aws.Int(1), + }, + }, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + wantErr: errors.New("cannot find contVethName eth0 in prevResult"), + }, + { + name: "container IP don't exists", + fields: fields{}, + args: args{ + conf: &NetConf{ + NetConf: types.NetConf{ + PrevResult: ¤t.Result{ + Interfaces: []*current.Interface{ + { + Name: "enicc21c2d7785", + }, + { + Name: "eth0", + Sandbox: "/proc/42/ns/net", + }, + { + Name: "dummycc21c2d7785", + Mac: "7", + }, + }, + IPs: []*current.IPConfig{}, + }, + }, + PodSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + k8sArgs: K8sArgs{ + K8S_POD_NAMESPACE: "default", + K8S_POD_NAME: "sample-pod", + }, + contVethName: "eth0", + }, + wantErr: errors.New("found 0 containerIP for eth0 in prevResult"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testLogCfg := logger.Configuration{ + LogLevel: "Debug", + LogLocation: "stdout", + } + testLogger := logger.New(&testLogCfg) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + driverClient := mock_driver.NewMockNetworkAPIs(ctrl) + for _, call := range tt.fields.teardownBranchENIPodNetworkCalls { + driverClient.EXPECT().TeardownBranchENIPodNetwork(call.containerAddr, call.vlanID, call.podSGEnforcingMode, gomock.Any()).Return(call.err) + } + + got, err := tryDelWithPrevResult(driverClient, tt.args.conf, tt.args.k8sArgs, tt.args.contVethName, "/proc/1/ns", testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/cmd/routed-eni-cni-plugin/driver/driver.go b/cmd/routed-eni-cni-plugin/driver/driver.go index 2b469c57b0..d96a585771 100644 --- a/cmd/routed-eni-cni-plugin/driver/driver.go +++ b/cmd/routed-eni-cni-plugin/driver/driver.go @@ -21,6 +21,8 @@ import ( "syscall" "time" + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/pkg/errors" "golang.org/x/sys/unix" @@ -29,7 +31,6 @@ import ( "github.com/aws/amazon-vpc-cni-k8s/pkg/ipwrapper" "github.com/aws/amazon-vpc-cni-k8s/pkg/netlinkwrapper" - "github.com/aws/amazon-vpc-cni-k8s/pkg/networkutils" "github.com/aws/amazon-vpc-cni-k8s/pkg/nswrapper" "github.com/aws/amazon-vpc-cni-k8s/pkg/procsyswrapper" "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" @@ -42,8 +43,6 @@ const ( toContainerRulePriority = 512 // 1024 is reserved for (IP rule not to table main) fromContainerRulePriority = 1536 - // Main routing table number - mainRouteTable = unix.RT_TABLE_MAIN WAIT_INTERVAL = 50 * time.Millisecond @@ -54,11 +53,16 @@ const ( // NetworkAPIs defines network API calls type NetworkAPIs interface { - SetupNS(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, deviceNumber int, vpcCIDRs []string, useExternalSNAT bool, mtu int, log logger.Logger) error - TeardownNS(addr *net.IPNet, deviceNumber int, log logger.Logger) error - SetupPodENINetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, vlanID int, eniMAC string, - subnetGW string, parentIfIndex int, mtu int, log logger.Logger) error - TeardownPodENINetwork(vlanID int, log logger.Logger) error + // SetupPodNetwork sets up pod network for normal ENI based pods + SetupPodNetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, deviceNumber int, mtu int, log logger.Logger) error + // TeardownPodNetwork clean up pod network for normal ENI based pods + TeardownPodNetwork(containerAddr *net.IPNet, deviceNumber int, log logger.Logger) error + + // SetupBranchENIPodNetwork sets up pod network for branch ENI based pods + SetupBranchENIPodNetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, vlanID int, eniMAC string, + subnetGW string, parentIfIndex int, mtu int, podSGEnforcingMode sgpp.EnforcingMode, log logger.Logger) error + // TeardownBranchENIPodNetwork cleans up pod network for branch ENI based pods + TeardownBranchENIPodNetwork(containerAddr *net.IPNet, vlanID int, podSGEnforcingMode sgpp.EnforcingMode, log logger.Logger) error } type linuxNetwork struct { @@ -216,7 +220,7 @@ func (createVethContext *createVethPairContext) run(hostNS ns.NetNS) error { } if createVethContext.v6Addr != nil && createVethContext.v6Addr.IP.To16() != nil { - if err := WaitForAddressesToBeStable(createVethContext.contVethName, v6DADTimeout); err != nil { + if err := waitForAddressesToBeStable(createVethContext.netLink, createVethContext.contVethName, v6DADTimeout); err != nil { return errors.Wrap(err, "setup NS network: failed while waiting for v6 addresses to be stable") } } @@ -230,18 +234,18 @@ func (createVethContext *createVethPairContext) run(hostNS ns.NetNS) error { } // Implements `SettleAddresses` functionality of the `ip` package. -// WaitForAddressesToBeStable waits for all addresses on a link to leave tentative state. +// waitForAddressesToBeStable waits for all addresses on a link to leave tentative state. // Will be particularly useful for ipv6, where all addresses need to do DAD. // If any addresses are still tentative after timeout seconds, then error. -func WaitForAddressesToBeStable(ifName string, timeout time.Duration) error { - link, err := netlink.LinkByName(ifName) +func waitForAddressesToBeStable(netLink netlinkwrapper.NetLink, ifName string, timeout time.Duration) error { + link, err := netLink.LinkByName(ifName) if err != nil { return fmt.Errorf("failed to retrieve link: %v", err) } deadline := time.Now().Add(timeout) for { - addrs, err := netlink.AddrList(link, netlink.FAMILY_V6) + addrs, err := netLink.AddrList(link, netlink.FAMILY_V6) if err != nil { return fmt.Errorf("could not list addresses: %v", err) } @@ -267,354 +271,367 @@ func WaitForAddressesToBeStable(ifName string, timeout time.Duration) error { } } -// SetupNS wires up linux networking for a pod's network -func (os *linuxNetwork) SetupNS(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, - deviceNumber int, vpcCIDRs []string, useExternalSNAT bool, mtu int, log logger.Logger) error { - log.Debugf("SetupNS: hostVethName=%s, contVethName=%s, netnsPath=%s, deviceNumber=%d, mtu=%d", hostVethName, contVethName, netnsPath, deviceNumber, mtu) - return setupNS(hostVethName, contVethName, netnsPath, v4Addr, v6Addr, deviceNumber, vpcCIDRs, useExternalSNAT, os.netLink, os.ns, mtu, log, os.procSys) -} - -func setupNS(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, deviceNumber int, vpcCIDRs []string, useExternalSNAT bool, - netLink netlinkwrapper.NetLink, ns nswrapper.NS, mtu int, log logger.Logger, procSys procsyswrapper.ProcSys) error { +// SetupPodNetwork wires up linux networking for a pod's network +// we expect v4Addr and v6Addr to have correct IPAddress Family. +func (n *linuxNetwork) SetupPodNetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, + deviceNumber int, mtu int, log logger.Logger) error { + log.Debugf("SetupPodNetwork: hostVethName=%s, contVethName=%s, netnsPath=%s, v4Addr=%v, v6Addr=%v, deviceNumber=%d, mtu=%d", + hostVethName, contVethName, netnsPath, v4Addr, v6Addr, deviceNumber, mtu) - hostVeth, err := setupVeth(hostVethName, contVethName, netnsPath, v4Addr, v6Addr, netLink, ns, mtu, procSys, log) + hostVeth, err := n.setupVeth(hostVethName, contVethName, netnsPath, v4Addr, v6Addr, mtu, log) if err != nil { - return errors.Wrapf(err, "setupNS network: failed to setup veth pair.") + return errors.Wrapf(err, "SetupPodNetwork: failed to setup veth pair") } - log.Debugf("Setup host route outgoing hostVeth, LinkIndex %d", hostVeth.Attrs().Index) + var containerAddr *net.IPNet + if v4Addr != nil { + containerAddr = v4Addr + } else if v6Addr != nil { + containerAddr = v6Addr + } - var addrHostAddr *net.IPNet - //We only support either v4 or v6 modes. - if v4Addr != nil && v4Addr.IP.To4() != nil { - addrHostAddr = &net.IPNet{ - IP: v4Addr.IP, - Mask: net.CIDRMask(32, 32)} - } else if v6Addr != nil && v6Addr.IP.To16() != nil { - addrHostAddr = &net.IPNet{ - IP: v6Addr.IP, - Mask: net.CIDRMask(128, 128)} + rtTable := unix.RT_TABLE_MAIN + if deviceNumber > 0 { + rtTable = deviceNumber + 1 } + if err := n.setupIPBasedContainerRouteRules(hostVeth, containerAddr, rtTable, log); err != nil { + return errors.Wrapf(err, "SetupPodNetwork: unable to setup IP based container routes and rules") + } + return nil +} - // Add host route - route := netlink.Route{ - LinkIndex: hostVeth.Attrs().Index, - Scope: netlink.SCOPE_LINK, - Dst: addrHostAddr} +// TeardownPodNetwork cleanup ip rules +func (n *linuxNetwork) TeardownPodNetwork(containerAddr *net.IPNet, deviceNumber int, log logger.Logger) error { + log.Debugf("TeardownPodNetwork: containerAddr=%s, deviceNumber=%d", containerAddr.String(), deviceNumber) - // Add or replace route - if err := netLink.RouteReplace(&route); err != nil { - return errors.Wrapf(err, "setupNS: unable to add or replace route entry for %s", route.Dst.IP.String()) + rtTable := unix.RT_TABLE_MAIN + if deviceNumber > 0 { + rtTable = deviceNumber + 1 + } + if err := n.teardownIPBasedContainerRouteRules(containerAddr, rtTable, log); err != nil { + return errors.Wrapf(err, "TeardownPodNetwork: unable to teardown IP based container routes and rules") } - log.Debugf("Successfully set host route to be %s/0", route.Dst.IP.String()) + return nil +} - err = addContainerRule(netLink, true, addrHostAddr, mainRouteTable) +// SetupBranchENIPodNetwork sets up the network ns for pods requesting its own security group +// we expect v4Addr and v6Addr to have correct IPAddress Family. +func (n *linuxNetwork) SetupBranchENIPodNetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, + vlanID int, eniMAC string, subnetGW string, parentIfIndex int, mtu int, podSGEnforcingMode sgpp.EnforcingMode, log logger.Logger) error { + log.Debugf("SetupBranchENIPodNetwork: hostVethName=%s, contVethName=%s, netnsPath=%s, v4Addr=%v, v6Addr=%v, vlanID=%d, eniMAC=%s, subnetGW=%s, parentIfIndex=%d, mtu=%d, podSGEnforcingMode=%v", + hostVethName, contVethName, netnsPath, v4Addr, v6Addr, vlanID, eniMAC, subnetGW, parentIfIndex, mtu, podSGEnforcingMode) + hostVeth, err := n.setupVeth(hostVethName, contVethName, netnsPath, v4Addr, v6Addr, mtu, log) if err != nil { - log.Errorf("Failed to add toContainer rule for %s err=%v, ", addrHostAddr.String(), err) - return errors.Wrap(err, "setupNS network: failed to add toContainer") + return errors.Wrapf(err, "SetupBranchENIPodNetwork: failed to setup veth pair") } - log.Infof("Added toContainer rule for %s", addrHostAddr.String()) + // clean up any previous hostVeth ip rule recursively. (when pod with same name are recreated multiple times). + // + // per our understanding, previous we obtain vlanID from pod spec, it could be possible the vlanID is already updated when deleting old pod, thus the hostVeth been cleaned up during oldPod deletion is incorrect. + // now since we obtain vlanID from prevResult during pod deletion, we should be able to correctly purge hostVeth during pod deletion and thus don't need this logic. + // this logic is kept here for safety purpose. + oldFromHostVethRule := n.netLink.NewRule() + oldFromHostVethRule.IifName = hostVethName + oldFromHostVethRule.Priority = vlanRulePriority + if err := netLinkRuleDelAll(n.netLink, oldFromHostVethRule); err != nil { + return errors.Wrapf(err, "SetupBranchENIPodNetwork: failed to delete hostVeth rule for %s", hostVethName) + } - // add from-pod rule, only need it when it is not primary ENI - if deviceNumber > 0 { - // To be backwards compatible, we will have to keep this off-by one setting - tableNumber := deviceNumber + 1 - // add rule: 1536: from use table - err = addContainerRule(netLink, false, addrHostAddr, tableNumber) - if err != nil { - log.Errorf("Failed to add fromContainer rule for %s err: %v", addrHostAddr.String(), err) - return errors.Wrap(err, "add NS network: failed to add fromContainer rule") + rtTable := vlanID + 100 + vlanLink, err := n.setupVlan(vlanID, eniMAC, subnetGW, parentIfIndex, rtTable, log) + if err != nil { + return errors.Wrapf(err, "SetupBranchENIPodNetwork: failed to setup vlan") + } + + var containerAddr *net.IPNet + if v4Addr != nil { + containerAddr = v4Addr + } else if v6Addr != nil { + containerAddr = v6Addr + } + + switch podSGEnforcingMode { + case sgpp.EnforcingModeStrict: + if err := n.setupIIFBasedContainerRouteRules(hostVeth, containerAddr, vlanLink, rtTable, log); err != nil { + return errors.Wrapf(err, "SetupBranchENIPodNetwork: unable to setup IIF based container routes and rules") + } + case sgpp.EnforcingModeStandard: + if err := n.setupIPBasedContainerRouteRules(hostVeth, containerAddr, rtTable, log); err != nil { + return errors.Wrapf(err, "SetupBranchENIPodNetwork: unable to setup IP based container routes and rules") } - log.Infof("Added rule priority %d from %s table %d", fromContainerRulePriority, addrHostAddr.String(), tableNumber) } return nil } +// TeardownBranchENIPodNetwork tears down the vlan and corresponding ip rules. +func (n *linuxNetwork) TeardownBranchENIPodNetwork(containerAddr *net.IPNet, vlanID int, _ sgpp.EnforcingMode, log logger.Logger) error { + log.Debugf("TeardownBranchENIPodNetwork: containerAddr=%s, vlanID=%d", containerAddr.String(), vlanID) + + if err := n.teardownVlan(vlanID, log); err != nil { + return errors.Wrapf(err, "TeardownBranchENIPodNetwork: failed to teardown vlan") + } + + // to handle the migration between different enforcingMode, we try to clean up rules under both mode since the pod might be setup with a different mode. + rtTable := vlanID + 100 + if err := n.teardownIIFBasedContainerRouteRules(rtTable, log); err != nil { + return errors.Wrapf(err, "TeardownBranchENIPodNetwork: unable to teardown IIF based container routes and rules") + } + if err := n.teardownIPBasedContainerRouteRules(containerAddr, rtTable, log); err != nil { + return errors.Wrapf(err, "TeardownBranchENIPodNetwork: unable to teardown IP based container routes and rules") + } + + return nil +} + // setupVeth sets up veth for the pod. -func setupVeth(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, netLink netlinkwrapper.NetLink, - ns nswrapper.NS, mtu int, procSys procsyswrapper.ProcSys, log logger.Logger) (netlink.Link, error) { +func (n *linuxNetwork) setupVeth(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, v6Addr *net.IPNet, mtu int, log logger.Logger) (netlink.Link, error) { // Clean up if hostVeth exists. - if oldHostVeth, err := netLink.LinkByName(hostVethName); err == nil { - if err = netLink.LinkDel(oldHostVeth); err != nil { - return nil, errors.Wrapf(err, "setupVeth network: failed to delete old hostVeth %q", hostVethName) + if oldHostVeth, err := n.netLink.LinkByName(hostVethName); err == nil { + if err = n.netLink.LinkDel(oldHostVeth); err != nil { + return nil, errors.Wrapf(err, "failed to delete old hostVeth %s", hostVethName) } - log.Debugf("Cleaned up old hostVeth: %v\n", hostVethName) + log.Debugf("Successfully deleted old hostVeth %s", hostVethName) } - log.Debugf("v4addr: %v; v6Addr: %v\n", v4Addr, v6Addr) createVethContext := newCreateVethPairContext(contVethName, hostVethName, v4Addr, v6Addr, mtu) - if err := ns.WithNetNSPath(netnsPath, createVethContext.run); err != nil { - log.Errorf("Failed to setup veth network %v", err) - return nil, errors.Wrap(err, "setupVeth network: failed to setup veth network") + if err := n.ns.WithNetNSPath(netnsPath, createVethContext.run); err != nil { + return nil, errors.Wrap(err, "failed to setup veth network") } - hostVeth, err := netLink.LinkByName(hostVethName) + hostVeth, err := n.netLink.LinkByName(hostVethName) if err != nil { - return nil, errors.Wrapf(err, "setupVeth network: failed to find link %q", hostVethName) + return nil, errors.Wrapf(err, "failed to find hostVeth %s", hostVethName) } - if err := procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0"); err != nil { + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_ra", hostVethName), "0"); err != nil { if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "setupVeth network: failed to disable IPv6 router advertisements") + return nil, errors.Wrapf(err, "failed to disable IPv6 router advertisements") } - log.Debugf("setupVeth network: Ignoring '%s' writing to accept_ra: Assuming kernel lacks IPv6 support", err) + log.Debugf("Ignoring '%v' writing to accept_ra: Assuming kernel lacks IPv6 support", err) } - if err := procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_redirects", hostVethName), "0"); err != nil { + if err := n.procSys.Set(fmt.Sprintf("net/ipv6/conf/%s/accept_redirects", hostVethName), "0"); err != nil { if !os.IsNotExist(err) { - return nil, errors.Wrapf(err, "setupVeth network: failed to disable IPv6 ICMP redirects") + return nil, errors.Wrapf(err, "failed to disable IPv6 ICMP redirects") } - log.Debugf("setupVeth network: Ignoring '%s' writing to accept_redirects: Assuming kernel lacks IPv6 support", err) + log.Debugf("Ignoring '%v' writing to accept_redirects: Assuming kernel lacks IPv6 support", err) } - log.Debugf("setupVeth network: disabled IPv6 RA and ICMP redirects on %s", hostVethName) + log.Debugf("Successfully disabled IPv6 RA and ICMP redirects on hostVeth %s", hostVethName) // Explicitly set the veth to UP state, because netlink doesn't always do that on all the platforms with net.FlagUp. // veth won't get a link local address unless it's set to UP state. - if err = netLink.LinkSetUp(hostVeth); err != nil { - return nil, errors.Wrapf(err, "setupVeth network: failed to set link %q up", hostVethName) + if err = n.netLink.LinkSetUp(hostVeth); err != nil { + return nil, errors.Wrapf(err, "failed to setup hostVeth %s", hostVethName) } return hostVeth, nil } -// SetupPodENINetwork sets up the network ns for pods requesting its own security group -func (os *linuxNetwork) SetupPodENINetwork(hostVethName string, contVethName string, netnsPath string, v4Addr *net.IPNet, - v6Addr *net.IPNet, vlanID int, eniMAC string, subnetGW string, parentIfIndex int, mtu int, log logger.Logger) error { - - hostVeth, err := setupVeth(hostVethName, contVethName, netnsPath, v4Addr, v6Addr, os.netLink, os.ns, mtu, os.procSys, log) - if err != nil { - return errors.Wrapf(err, "SetupPodENINetwork failed to setup veth pair.") - } - - vlanTableID := vlanID + 100 - vlanLink := buildVlanLink(vlanID, parentIfIndex, eniMAC) - - // 1a. clean up if vlan already exists (necessary when trunk ENI changes). - if oldVlan, err := os.netLink.LinkByName(vlanLink.Name); err == nil { - if err = os.netLink.LinkDel(oldVlan); err != nil { - return errors.Wrapf(err, "SetupPodENINetwork: failed to delete old vlan %s", vlanLink.Name) - } - log.Debugf("Cleaned up old vlan: %s", vlanLink.Name) - } - - // 1b. clean up any previous hostVeth ip rule - oldVlanRule := os.netLink.NewRule() - oldVlanRule.IifName = hostVethName - oldVlanRule.Priority = vlanRulePriority - // loop is required to clean up all existing rules created on the host (when pod with same name are recreated multiple times) - for { - if err := os.netLink.RuleDel(oldVlanRule); err != nil { - if !containsNoSuchRule(err) { - return errors.Wrapf(err, "SetupPodENINetwork: failed to delete hostveth rule for %s", hostVeth.Attrs().Name) - } - break +// setupVlan sets up the vlan interface for branchENI, and configures default routes in specified route table +func (n *linuxNetwork) setupVlan(vlanID int, eniMAC string, subnetGW string, parentIfIndex int, rtTable int, log logger.Logger) (netlink.Link, error) { + vlanLinkName := buildVlanLinkName(vlanID) + // 1. clean up if vlan already exists (necessary when trunk ENI changes). + if oldVlan, err := n.netLink.LinkByName(vlanLinkName); err == nil { + if err := n.netLink.LinkDel(oldVlan); err != nil { + return nil, errors.Wrapf(err, "failed to delete old vlan link %s", vlanLinkName) } + log.Debugf("Successfully deleted old vlan link: %s", vlanLinkName) } // 2. add new vlan link - err = os.netLink.LinkAdd(vlanLink) - if err != nil { - return errors.Wrapf(err, "SetupPodENINetwork: failed to add vlan link.") + vlanLink := buildVlanLink(vlanLinkName, vlanID, parentIfIndex, eniMAC) + if err := n.netLink.LinkAdd(vlanLink); err != nil { + return nil, errors.Wrapf(err, "failed to add vlan link %s", vlanLinkName) } // 3. bring up the vlan - if err = os.netLink.LinkSetUp(vlanLink); err != nil { - return errors.Wrapf(err, "SetupPodENINetwork: failed to set link %q up", vlanLink.Name) + if err := n.netLink.LinkSetUp(vlanLink); err != nil { + return nil, errors.Wrapf(err, "failed to setUp vlan link %s", vlanLinkName) } // 4. create default routes for vlan - routes := buildRoutesForVlan(vlanTableID, vlanLink.Index, net.ParseIP(subnetGW)) + routes := buildRoutesForVlan(rtTable, vlanLink.Index, net.ParseIP(subnetGW)) for _, r := range routes { - if err := os.netLink.RouteReplace(&r); err != nil { - return errors.Wrapf(err, "SetupPodENINetwork: unable to replace route entry %s via %s", r.Dst.IP.String(), subnetGW) + if err := n.netLink.RouteReplace(&r); err != nil { + return nil, errors.Wrapf(err, "failed to replace route entry %s via %s", r.Dst.IP.String(), subnetGW) } } + return vlanLink, nil +} - var addr *net.IPNet - if v4Addr != nil { - addr = v4Addr - } else if v6Addr != nil { - addr = v6Addr +func (n *linuxNetwork) teardownVlan(vlanID int, log logger.Logger) error { + vlanLinkName := buildVlanLinkName(vlanID) + if vlan, err := n.netLink.LinkByName(vlanLinkName); err == nil { + if err := n.netLink.LinkDel(vlan); err != nil { + return errors.Wrapf(err, "failed to delete vlan link %s", vlanLinkName) + } + log.Debugf("Successfully deleted vlan link %s", vlanLinkName) } + return nil +} - // 5. create route entry for hostveth. +// setupIPBasedContainerRouteRules setups the routes and route rules for containers based on IP. +// traffic to container(to containerAddr) will be routed via the `main` route table. +// traffic from container(from containerAddr) will be routed via the specified rtTable. +func (n *linuxNetwork) setupIPBasedContainerRouteRules(hostVeth netlink.Link, containerAddr *net.IPNet, rtTable int, log logger.Logger) error { route := netlink.Route{ LinkIndex: hostVeth.Attrs().Index, Scope: netlink.SCOPE_LINK, - Dst: addr, - Table: vlanTableID, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, } - if err := os.netLink.RouteReplace(&route); err != nil { - return errors.Wrapf(err, "SetupPodENINetwork: unable to add or replace route entry for %s", route.Dst.IP.String()) + if err := n.netLink.RouteReplace(&route); err != nil { + return errors.Wrapf(err, "failed to setup container route, containerAddr=%s, hostVeth=%s, rtTable=%v", + containerAddr.String(), hostVeth.Attrs().Name, "main") } + log.Debugf("Successfully setup container route, containerAddr=%s, hostVeth=%s, rtTable=%v", + containerAddr.String(), hostVeth.Attrs().Name, "main") - log.Debugf("Successfully set host route to be %s/0", route.Dst.IP.String()) - - // 6. Add ip rules for the pod. - vlanRule := os.netLink.NewRule() - vlanRule.Table = vlanTableID - vlanRule.Priority = vlanRulePriority - vlanRule.IifName = vlanLink.Name - err = os.netLink.RuleAdd(vlanRule) - if err != nil && !isRuleExistsError(err) { - return errors.Wrapf(err, "SetupPodENINetwork: unable to add ip rule for vlan link %s ", vlanLink.Name) + toContainerRule := n.netLink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN + if err := n.netLink.RuleAdd(toContainerRule); err != nil && !isRuleExistsError(err) { + return errors.Wrapf(err, "failed to setup toContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), "main") } - vlanRule.IifName = hostVeth.Attrs().Name - err = os.netLink.RuleAdd(vlanRule) - if err != nil && !isRuleExistsError(err) { - return errors.Wrapf(err, "SetupPodENINetwork: unable to add ip rule for host veth %s", hostVethName) + log.Debugf("Successfully setup toContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), "main") + + if rtTable != unix.RT_TABLE_MAIN { + fromContainerRule := n.netLink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = rtTable + if err := n.netLink.RuleAdd(fromContainerRule); err != nil && !isRuleExistsError(err) { + return errors.Wrapf(err, "failed to setup fromContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), rtTable) + } + log.Debugf("Successfully setup fromContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), rtTable) } + return nil } -// buildRoutesForVlan builds routes required for the vlan link. -func buildRoutesForVlan(vlanTableID int, vlanIndex int, gw net.IP) []netlink.Route { - return []netlink.Route{ - // Add a direct link route for the pod vlan link only. - { - LinkIndex: vlanIndex, - Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(32, 32)}, - Scope: netlink.SCOPE_LINK, - Table: vlanTableID, - }, - { - LinkIndex: vlanIndex, - Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, - Scope: netlink.SCOPE_UNIVERSE, - Gw: gw, - Table: vlanTableID, - }, +func (n *linuxNetwork) teardownIPBasedContainerRouteRules(containerAddr *net.IPNet, rtTable int, log logger.Logger) error { + toContainerRule := n.netLink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN + if err := n.netLink.RuleDel(toContainerRule); err != nil && !containsNoSuchRule(err) { + return errors.Wrapf(err, "failed to delete toContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), "main") } -} + log.Debugf("Successfully deleted toContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), "main") -// buildVlanLink builds vlan link for the pod. -func buildVlanLink(vlanID int, parentIfIndex int, eniMAC string) *netlink.Vlan { - la := netlink.NewLinkAttrs() - la.Name = fmt.Sprintf("vlan.eth.%d", vlanID) - la.ParentIndex = parentIfIndex - la.HardwareAddr, _ = net.ParseMAC(eniMAC) - return &netlink.Vlan{LinkAttrs: la, VlanId: vlanID} -} + if rtTable != unix.RT_TABLE_MAIN { + fromContainerRule := netlink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = rtTable -func addContainerRule(netLink netlinkwrapper.NetLink, isToContainer bool, addr *net.IPNet, table int) error { - if addr == nil { - return errors.New("can't add container rules without an IP address") - } - containerRule := netLink.NewRule() - if isToContainer { - // Example: 512: from all to 10.200.202.222 lookup main - containerRule.Dst = addr - containerRule.Priority = toContainerRulePriority - } else { - // Example: 1536: from 10.200.202.222 to 10.200.0.0/16 lookup 2 - containerRule.Src = addr - containerRule.Priority = fromContainerRulePriority + // note: older version CNI sets up multiple CIDR based from container rule, so we recursively delete them to be backwards-compatible. + if err := netLinkRuleDelAll(n.netLink, fromContainerRule); err != nil { + return errors.Wrapf(err, "failed to delete fromContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), rtTable) + } + log.Debugf("Successfully deleted fromContainer rule, containerAddr=%s, rtTable=%v", containerAddr.String(), rtTable) } - containerRule.Table = table - err := netLink.RuleDel(containerRule) - if err != nil && !containsNoSuchRule(err) { - return errors.Wrapf(err, "addContainerRule: failed to delete old container rule for %s", addr.String()) + route := netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, } - err = netLink.RuleAdd(containerRule) - if err != nil { - return errors.Wrapf(err, "addContainerRule: failed to add container rule for %s", addr.String()) + // routes will be automatically deleted by kernel when the hostVeth is deleted. + // we try to delete route and only log a warning even deletion failed. + if err := n.netLink.RouteDel(&route); err != nil && !netlinkwrapper.IsNotExistsError(err) { + log.Warnf("failed to delete container route, containerAddr=%s, rtTable=%v: %v", containerAddr.String(), "main", err) + } else { + log.Debugf("Successfully deleted container route, containerAddr=%s, rtTable=%v", containerAddr.String(), "main") } - return nil -} -// TeardownPodNetwork cleanup ip rules -func (os *linuxNetwork) TeardownNS(addr *net.IPNet, deviceNumber int, log logger.Logger) error { - log.Debugf("TeardownNS: addr %s, deviceNumber %d", addr.String(), deviceNumber) - return tearDownNS(addr, deviceNumber, os.netLink, log) + return nil } -func tearDownNS(addr *net.IPNet, deviceNumber int, netLink netlinkwrapper.NetLink, log logger.Logger) error { - if addr == nil { - return errors.New("can't tear down network namespace with no IP address") +// setupIIFBasedContainerRouteRules setups the routes and route rules for containers based on input network interface. +// traffic to container(iif hostVlan) will be routed via the specified rtTable. +// traffic from container(iif hostVeth) will be routed via the specified rtTable. +func (n *linuxNetwork) setupIIFBasedContainerRouteRules(hostVeth netlink.Link, containerAddr *net.IPNet, hostVlan netlink.Link, rtTable int, log logger.Logger) error { + route := netlink.Route{ + LinkIndex: hostVeth.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, } - // Remove to-pod rule - toContainerRule := netLink.NewRule() - toContainerRule.Dst = addr - toContainerRule.Priority = toContainerRulePriority - err := netLink.RuleDel(toContainerRule) - - if err != nil { - log.Errorf("Failed to delete toContainer rule for %s err %v", addr.String(), err) - } else { - log.Infof("Delete toContainer rule for %s ", addr.String()) + if err := n.netLink.RouteReplace(&route); err != nil { + return errors.Wrapf(err, "failed to setup container route, containerAddr=%s, hostVeth=%s, rtTable=%v", + containerAddr.String(), hostVeth.Attrs().Name, rtTable) } + log.Debugf("Successfully setup container route, containerAddr=%s, hostVeth=%s, rtTable=%v", + containerAddr.String(), hostVeth.Attrs().Name, rtTable) - if deviceNumber > 0 { - // remove from-pod rule only for non main table - err := deleteRuleListBySrc(*addr) - if err != nil { - log.Errorf("Failed to delete fromContainer for %s %v", addr.String(), err) - return errors.Wrapf(err, "delete NS network: failed to delete fromContainer rule for %s", addr.String()) - } - tableNumber := deviceNumber + 1 - log.Infof("Delete fromContainer rule for %s in table %d", addr.String(), tableNumber) + fromHostVlanRule := n.netLink.NewRule() + fromHostVlanRule.IifName = hostVlan.Attrs().Name + fromHostVlanRule.Priority = vlanRulePriority + fromHostVlanRule.Table = rtTable + if err := n.netLink.RuleAdd(fromHostVlanRule); err != nil && !isRuleExistsError(err) { + return errors.Wrapf(err, "unable to setup fromHostVlan rule, hostVlan=%s, rtTable=%v", hostVlan.Attrs().Name, rtTable) } + log.Debugf("Successfully setup fromHostVlan rule, hostVlan=%s, rtTable=%v", hostVlan.Attrs().Name, rtTable) - // cleanup host route - if err = netLink.RouteDel(&netlink.Route{ - Scope: netlink.SCOPE_LINK, - Dst: addr}); err != nil { - log.Errorf("delete NS network: failed to delete host route for %s, %v", addr.String(), err) + fromHostVethRule := n.netLink.NewRule() + fromHostVethRule.IifName = hostVeth.Attrs().Name + fromHostVethRule.Priority = vlanRulePriority + fromHostVethRule.Table = rtTable + if err := n.netLink.RuleAdd(fromHostVethRule); err != nil && !isRuleExistsError(err) { + return errors.Wrapf(err, "unable to setup fromHostVeth rule, hostVeth=%s, rtTable=%v", hostVeth.Attrs().Name, rtTable) } - log.Debug("Tear down of NS complete") + log.Debugf("Successfully setup fromHostVeth rule, hostVeth=%s, rtTable=%v", hostVeth.Attrs().Name, rtTable) + return nil } -// TeardownPodENINetwork tears down the vlan and corresponding ip rules. -func (os *linuxNetwork) TeardownPodENINetwork(vlanID int, log logger.Logger) error { - log.Infof("Tear down of pod ENI namespace") +func (n *linuxNetwork) teardownIIFBasedContainerRouteRules(rtTable int, log logger.Logger) error { + rule := n.netLink.NewRule() + rule.Priority = vlanRulePriority + rule.Table = rtTable - // 1. delete vlan - if vlan, err := os.netLink.LinkByName(fmt.Sprintf("vlan.eth.%d", - vlanID)); err == nil { - err := os.netLink.LinkDel(vlan) - if err != nil { - return errors.Wrapf(err, "TeardownPodENINetwork: failed to delete vlan link for %d", vlanID) - } + if err := netLinkRuleDelAll(n.netLink, rule); err != nil { + return errors.Wrapf(err, "failed to delete IIF based rules, rtTable=%v", rtTable) } + log.Debugf("Successfully deleted IIF based rules, rtTable=%v", rtTable) - // 2. delete two ip rules associated with the vlan - vlanRule := os.netLink.NewRule() - vlanRule.Table = vlanID + 100 - vlanRule.Priority = vlanRulePriority - - for { - // Loop until both the rules are deleted. - // one of them handles vlan traffic and other is for pod host veth traffic. - if err := os.netLink.RuleDel(vlanRule); err != nil { - if !containsNoSuchRule(err) { - return errors.Wrapf(err, "TeardownPodENINetwork: failed to delete container rule for %d", vlanID) - } - break - } - } return nil } -func deleteRuleListBySrc(src net.IPNet) error { - networkClient := networkutils.New() - return networkClient.DeleteRuleListBySrc(src) +// buildRoutesForVlan builds routes required for the vlan link. +func buildRoutesForVlan(vlanTableID int, vlanIndex int, gw net.IP) []netlink.Route { + return []netlink.Route{ + // Add a direct link route for the pod vlan link only. + { + LinkIndex: vlanIndex, + Dst: &net.IPNet{IP: gw, Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: vlanTableID, + }, + { + LinkIndex: vlanIndex, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: gw, + Table: vlanTableID, + }, + } } -func containsNoSuchRule(err error) bool { - if errno, ok := err.(syscall.Errno); ok { - return errno == syscall.ENOENT - } - return false +// buildVlanLinkName builds the name for vlan link. +func buildVlanLinkName(vlanID int) string { + return fmt.Sprintf("vlan.eth.%d", vlanID) } -func isRuleExistsError(err error) bool { - if errno, ok := err.(syscall.Errno); ok { - return errno == syscall.EEXIST - } - return false +// buildVlanLink builds vlan link for the pod. +func buildVlanLink(vlanName string, vlanID int, parentIfIndex int, eniMAC string) *netlink.Vlan { + la := netlink.NewLinkAttrs() + la.Name = vlanName + la.ParentIndex = parentIfIndex + la.HardwareAddr, _ = net.ParseMAC(eniMAC) + return &netlink.Vlan{LinkAttrs: la, VlanId: vlanID} } diff --git a/cmd/routed-eni-cni-plugin/driver/driver_test.go b/cmd/routed-eni-cni-plugin/driver/driver_test.go index 839ed6e553..45c7c46dcd 100644 --- a/cmd/routed-eni-cni-plugin/driver/driver_test.go +++ b/cmd/routed-eni-cni-plugin/driver/driver_test.go @@ -15,596 +15,4426 @@ package driver import ( "net" - "os" - "strings" "syscall" "testing" - "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" - - "github.com/golang/mock/gomock" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "github.com/vishvananda/netlink" - "github.com/aws/amazon-vpc-cni-k8s/pkg/cninswrapper/mock_ns" - mocks_ip "github.com/aws/amazon-vpc-cni-k8s/pkg/ipwrapper/mocks" "github.com/aws/amazon-vpc-cni-k8s/pkg/netlinkwrapper/mock_netlink" mock_netlinkwrapper "github.com/aws/amazon-vpc-cni-k8s/pkg/netlinkwrapper/mocks" mock_nswrapper "github.com/aws/amazon-vpc-cni-k8s/pkg/nswrapper/mocks" mock_procsyswrapper "github.com/aws/amazon-vpc-cni-k8s/pkg/procsyswrapper/mocks" + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" ) -const ( - testMAC = "01:23:45:67:89:ab" - testIP = "10.0.10.10" - testV6IP = "2001:db8::1" - testContVethName = "eth0" - testHostVethName = "aws-eth0" - testVlanName = "vlan.eth.1" - testFD = 10 - testnetnsPath = "/proc/1234/netns" - testTable = 10 - mtu = 9001 -) - -var logConfig = logger.Configuration{ +var testLogCfg = logger.Configuration{ LogLevel: "Debug", LogLocation: "stdout", } -var log = logger.New(&logConfig) +var testLogger = logger.New(&testLogCfg) -type testMocks struct { - ctrl *gomock.Controller - netlink *mock_netlinkwrapper.MockNetLink - ip *mocks_ip.MockIP - ns *mock_nswrapper.MockNS - netns *mock_ns.MockNetNS - procsys *mock_procsyswrapper.MockProcSys -} - -func setup(t *testing.T) *testMocks { - ctrl := gomock.NewController(t) - return &testMocks{ - ctrl: ctrl, - netlink: mock_netlinkwrapper.NewMockNetLink(ctrl), - ip: mocks_ip.NewMockIP(ctrl), - ns: mock_nswrapper.NewMockNS(ctrl), - netns: mock_ns.NewMockNetNS(ctrl), - procsys: mock_procsyswrapper.NewMockProcSys(ctrl), - } -} - -func (m *testMocks) mockWithFailureAt(t *testing.T, failAt string) *createVethPairContext { - mockContext := &createVethPairContext{ - contVethName: testContVethName, - hostVethName: testHostVethName, - netLink: m.netlink, - ip: m.ip, - v4Addr: &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), +func Test_linuxNetwork_SetupPodNetwork(t *testing.T) { + hostVethWithIndex9 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eni8ea2c11fe35", + Index: 9, }, } - - hwAddr, err := net.ParseMAC(testMAC) - assert.NoError(t, err) - - mockLinkAttrs := &netlink.LinkAttrs{ - HardwareAddr: hwAddr, + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), } - mockHostVeth := mock_netlink.NewMockLink(m.ctrl) - mockContVeth := mock_netlink.NewMockLink(m.ctrl) - var call *gomock.Call + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN - // veth pair - if failAt == "link-add" { - m.netlink.EXPECT().LinkAdd(gomock.Any()).Return(errors.New("error on LinkAdd")) - return mockContext - } - call = m.netlink.EXPECT().LinkAdd(gomock.Any()).Return(nil) + fromContainerRuleForRTTable4 := netlink.NewRule() + fromContainerRuleForRTTable4.Src = containerAddr + fromContainerRuleForRTTable4.Priority = fromContainerRulePriority + fromContainerRuleForRTTable4.Table = 4 - //hostVeth - if failAt == "link-by-name" { - m.netlink.EXPECT().LinkByName(gomock.Any()).Return(nil, errors.New("error on LinkByName host")).After(call) - return mockContext + type linkByNameCall struct { + linkName string + link netlink.Link + err error } - call = m.netlink.EXPECT().LinkByName(gomock.Any()).Return(mockHostVeth, nil).After(call) - - //host side setup - if failAt == "link-setup" { - m.netlink.EXPECT().LinkSetUp(mockHostVeth).Return(errors.New("error on LinkSetup")).After(call) - return mockContext + type linkDelCall struct { + link netlink.Link + err error } - call = m.netlink.EXPECT().LinkSetUp(mockHostVeth).Return(nil).After(call) - - //container side - if failAt == "link-byname" { - m.netlink.EXPECT().LinkByName(gomock.Any()).Return(mockContVeth, errors.New("error on LinkByName container")).After(call) - return mockContext + type linkSetupCall struct { + link netlink.Link + err error } - call = m.netlink.EXPECT().LinkByName(gomock.Any()).Return(mockContVeth, nil).After(call) - - // container setup - call = m.netlink.EXPECT().LinkSetUp(mockContVeth).Return(nil).After(call) - // container - call = mockContVeth.EXPECT().Attrs().Return(mockLinkAttrs).After(call) - - if failAt == "route-replace" { - m.netlink.EXPECT().RouteReplace(gomock.Any()).Return(errors.New("error on RouteReplace")).After(call) - return mockContext + type routeReplaceCall struct { + route *netlink.Route + err error } - call = m.netlink.EXPECT().RouteReplace(gomock.Any()).Return(nil).After(call) - - if failAt == "add-defaultroute" { - m.ip.EXPECT().AddDefaultRoute(gomock.Any(), mockContVeth).Return(errors.New("error on AddDefaultRoute")).After(call) - return mockContext + type ruleAddCall struct { + rule *netlink.Rule + err error } - call = m.ip.EXPECT().AddDefaultRoute(gomock.Any(), mockContVeth).Return(nil).After(call) - - // container addr - if failAt == "addr-add" { - m.netlink.EXPECT().AddrAdd(mockContVeth, gomock.Any()).Return(errors.New("error on AddrAdd")).After(call) - return mockContext + type withNetNSPathCall struct { + netNSPath string + err error } - call = m.netlink.EXPECT().AddrAdd(mockContVeth, gomock.Any()).Return(nil).After(call) - - // neighbor - call = mockContVeth.EXPECT().Attrs().Return(mockLinkAttrs).After(call) - // hostVethMAC - call = mockHostVeth.EXPECT().Attrs().Return(mockLinkAttrs).After(call) - if failAt == "neigh-add" { - m.netlink.EXPECT().NeighAdd(gomock.Any()).Return(errors.New("error on NeighAdd")).After(call) - return mockContext + type procSysSetCall struct { + key string + value string + err error } - call = m.netlink.EXPECT().NeighAdd(gomock.Any()).Return(nil).After(call) - call = m.netns.EXPECT().Fd().Return(uintptr(testFD)).After(call) - // move it host namespace - if failAt == "link-setns" { - m.netlink.EXPECT().LinkSetNsFd(mockHostVeth, testFD).Return(errors.New("error on LinkSetNsFd")).After(call) - return mockContext + type fields struct { + linkByNameCalls []linkByNameCall + linkDelCalls []linkDelCall + linkSetupCalls []linkSetupCall + withNetNSPathCalls []withNetNSPathCall + procSysSetCalls []procSysSetCall + routeReplaceCalls []routeReplaceCall + ruleAddCalls []ruleAddCall + } + type args struct { + hostVethName string + contVethName string + netnsPath string + v4Addr *net.IPNet + v6Addr *net.IPNet + deviceNumber int + mtu int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully setup pod network - pod sponsored by eth0", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + deviceNumber: 0, + mtu: 9001, + }, + }, + { + name: "successfully setup pod network - pod sponsored by eth3", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRuleForRTTable4, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + deviceNumber: 3, + mtu: 9001, + }, + }, + { + name: "failed to setup vethPair", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + deviceNumber: 3, + mtu: 9001, + }, + wantErr: errors.New("SetupPodNetwork: failed to setup veth pair: failed to setup veth network: some error"), + }, + { + name: "failed to setup container route", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + err: errors.New("some error"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + deviceNumber: 3, + mtu: 9001, + }, + wantErr: errors.New("SetupPodNetwork: unable to setup IP based container routes and rules: failed to setup container route, containerAddr=192.168.100.42/32, hostVeth=eni8ea2c11fe35, rtTable=main: some error"), + }, } - m.netlink.EXPECT().LinkSetNsFd(mockHostVeth, testFD).Return(nil).After(call) - - return mockContext -} - -func TestRun(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() - mockContext := m.mockWithFailureAt(t, "") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - err := mockContext.run(m.netns) - assert.NoError(t, err) -} + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } + for _, call := range tt.fields.linkSetupCalls { + netLink.EXPECT().LinkSetUp(call.link).Return(call.err) + } + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleAddCalls { + netLink.EXPECT().RuleAdd(call.rule).Return(call.err) + } -func TestRunLinkAddErr(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + ns := mock_nswrapper.NewMockNS(ctrl) + for _, call := range tt.fields.withNetNSPathCalls { + // we just assume the createVethContext executes, the logic of createVethContext will be tested by createVethContext itself. + ns.EXPECT().WithNetNSPath(call.netNSPath, gomock.Any()).Return(call.err) + } - mockContext := m.mockWithFailureAt(t, "link-add") + procSys := mock_procsyswrapper.NewMockProcSys(ctrl) + for _, call := range tt.fields.procSysSetCalls { + procSys.EXPECT().Set(call.key, call.value).Return(call.err) + } - err := mockContext.run(m.netns) - assert.Error(t, err) + n := &linuxNetwork{ + netLink: netLink, + ns: ns, + procSys: procSys, + } + err := n.SetupPodNetwork(tt.args.hostVethName, tt.args.contVethName, tt.args.netnsPath, tt.args.v4Addr, tt.args.v6Addr, tt.args.deviceNumber, tt.args.mtu, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } } -func TestRunErrLinkByNameHost(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_TeardownPodNetwork(t *testing.T) { + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), + } - mockContext := m.mockWithFailureAt(t, "link-by-name") + toContainerRoute := &netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + } + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN - err := mockContext.run(m.netns) - assert.Error(t, err) -} + fromContainerRuleForRTTable4 := netlink.NewRule() + fromContainerRuleForRTTable4.Src = containerAddr + fromContainerRuleForRTTable4.Priority = fromContainerRulePriority + fromContainerRuleForRTTable4.Table = 4 + type routeDelCall struct { + route *netlink.Route + err error + } + type ruleDelCall struct { + rule *netlink.Rule + err error + } + type fields struct { + routeDelCalls []routeDelCall + ruleDelCalls []ruleDelCall + } -func TestRunErrSetup(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + type args struct { + containerAddr *net.IPNet + deviceNumber int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully teardown pod network - pod sponsored by eth0", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + deviceNumber: 0, + }, + }, + { + name: "successfully teardown pod network - pod sponsored by eth3", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRuleForRTTable4, + }, + { + rule: fromContainerRuleForRTTable4, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + deviceNumber: 3, + }, + }, + { + name: "failed to delete toContainer rule", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + containerAddr: containerAddr, + deviceNumber: 3, + }, + wantErr: errors.New("TeardownPodNetwork: unable to teardown IP based container routes and rules: failed to delete toContainer rule, containerAddr=192.168.100.42/32, rtTable=main: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - mockContext := m.mockWithFailureAt(t, "link-setup") + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.routeDelCalls { + netLink.EXPECT().RouteDel(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } - err := mockContext.run(m.netns) - assert.Error(t, err) + n := &linuxNetwork{ + netLink: netLink, + } + err := n.TeardownPodNetwork(tt.args.containerAddr, tt.args.deviceNumber, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } } -func TestRunErrLinkByNameCont(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_SetupBranchENIPodNetwork(t *testing.T) { + vlanID := 7 + eniMac := "00:00:5e:00:53:af" + subnetGW := "192.168.120.1" + parentIfIndex := 3 - mockContext := m.mockWithFailureAt(t, "link-byname") + hostVethWithIndex9 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eni8ea2c11fe35", + Index: 9, + }, + } + vlanLinkPostAddWithIndex11 := buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac) + vlanLinkPostAddWithIndex11.Index = 11 + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), + } - err := mockContext.run(m.netns) - assert.Error(t, err) -} + oldFromHostVethRule := netlink.NewRule() + oldFromHostVethRule.IifName = "eni8ea2c11fe35" + oldFromHostVethRule.Priority = vlanRulePriority -func TestRunErrRouteAdd(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + fromHostVlanRule := netlink.NewRule() + fromHostVlanRule.IifName = vlanLinkPostAddWithIndex11.Name + fromHostVlanRule.Priority = vlanRulePriority + fromHostVlanRule.Table = 107 - mockContext := m.mockWithFailureAt(t, "route-replace") + fromHostVethRule := netlink.NewRule() + fromHostVethRule.IifName = hostVethWithIndex9.Name + fromHostVethRule.Priority = vlanRulePriority + fromHostVethRule.Table = 107 - err := mockContext.run(m.netns) - assert.Error(t, err) -} + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN -func TestRunErrAddDefaultRoute(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + fromContainerRule := netlink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = 107 - mockContext := m.mockWithFailureAt(t, "add-defaultroute") + type linkByNameCall struct { + linkName string + link netlink.Link + err error + } + type linkAddCall struct { + link netlink.Link + linkIndex int + err error + } + type linkDelCall struct { + link netlink.Link + err error + } + type linkSetupCall struct { + link netlink.Link + err error + } + type routeReplaceCall struct { + route *netlink.Route + err error + } + type ruleAddCall struct { + rule *netlink.Rule + err error + } + type ruleDelCall struct { + rule *netlink.Rule + err error + } - err := mockContext.run(m.netns) - assert.Error(t, err) -} + type withNetNSPathCall struct { + netNSPath string + err error + } + type procSysSetCall struct { + key string + value string + err error + } -func TestRunErrAddrAdd(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + type fields struct { + linkByNameCalls []linkByNameCall + linkAddCalls []linkAddCall + linkDelCalls []linkDelCall + linkSetupCalls []linkSetupCall + routeReplaceCalls []routeReplaceCall + ruleAddCalls []ruleAddCall + ruleDelCalls []ruleDelCall + withNetNSPathCalls []withNetNSPathCall + procSysSetCalls []procSysSetCall + } + type args struct { + hostVethName string + contVethName string + netnsPath string + v4Addr *net.IPNet + v6Addr *net.IPNet + vlanID int + eniMAC string + subnetGW string + parentIfIndex int + mtu int + podSGEnforcingMode sgpp.EnforcingMode + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully setup pod network - traffic enforced with strict mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetGW), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetGW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: 107, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + }, + { + rule: fromHostVethRule, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + }, + { + name: "successfully setup pod network - traffic enforced with standard mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetGW), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetGW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + }, + { + name: "failed to setup vethPair", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("SetupBranchENIPodNetwork: failed to setup veth pair: failed to setup veth network: some error"), + }, + { + name: "failed to clean up old hostVeth rule", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: errors.New("some error"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("SetupBranchENIPodNetwork: failed to delete hostVeth rule for eni8ea2c11fe35: some error"), + }, + { + name: "failed to setup vlan", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + err: errors.New("some error"), + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("SetupBranchENIPodNetwork: failed to setup vlan: failed to add vlan link vlan.eth.7: some error"), + }, + { + name: "failed to setup IP based container route", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetGW), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetGW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + err: errors.New("some error"), + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("SetupBranchENIPodNetwork: unable to setup IP based container routes and rules: failed to setup container route, containerAddr=192.168.100.42/32, hostVeth=eni8ea2c11fe35, rtTable=main: some error"), + }, + { + name: "failed to setup IIF based container route", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "vlan.eth.7", + err: errors.New("not exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 11, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: vlanLinkPostAddWithIndex11, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.ParseIP(subnetGW), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: vlanLinkPostAddWithIndex11.Index, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP(subnetGW), + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: hostVethWithIndex9.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: 107, + }, + err: errors.New("some error"), + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: oldFromHostVethRule, + err: syscall.ENOENT, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + v4Addr: containerAddr, + v6Addr: nil, + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: subnetGW, + parentIfIndex: parentIfIndex, + mtu: 9001, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + wantErr: errors.New("SetupBranchENIPodNetwork: unable to setup IIF based container routes and rules: failed to setup container route, containerAddr=192.168.100.42/32, hostVeth=eni8ea2c11fe35, rtTable=107: some error"), + }, + } - mockContext := m.mockWithFailureAt(t, "addr-add") + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - err := mockContext.run(m.netns) - assert.Error(t, err) -} + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkAddCalls { + netLink.EXPECT().LinkAdd(call.link).DoAndReturn(func(link netlink.Link) error { + if call.err != nil { + return call.err + } + vlanBeforeAdd := link.(*netlink.Vlan) + vlanBeforeAdd.Index = call.linkIndex + return nil + }) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } + for _, call := range tt.fields.linkSetupCalls { + netLink.EXPECT().LinkSetUp(call.link).Return(call.err) + } + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleAddCalls { + netLink.EXPECT().RuleAdd(call.rule).Return(call.err) + } + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } -func TestRunErrNeighAdd(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + ns := mock_nswrapper.NewMockNS(ctrl) + for _, call := range tt.fields.withNetNSPathCalls { + // we just assume the createVethContext executes, the logic of createVethContext will be tested by createVethContext itself. + ns.EXPECT().WithNetNSPath(call.netNSPath, gomock.Any()).Return(call.err) + } - mockContext := m.mockWithFailureAt(t, "neigh-add") + procSys := mock_procsyswrapper.NewMockProcSys(ctrl) + for _, call := range tt.fields.procSysSetCalls { + procSys.EXPECT().Set(call.key, call.value).Return(call.err) + } - err := mockContext.run(m.netns) - assert.Error(t, err) + n := &linuxNetwork{ + netLink: netLink, + ns: ns, + procSys: procSys, + } + err := n.SetupBranchENIPodNetwork(tt.args.hostVethName, tt.args.contVethName, tt.args.netnsPath, tt.args.v4Addr, tt.args.v6Addr, tt.args.vlanID, tt.args.eniMAC, tt.args.subnetGW, tt.args.parentIfIndex, tt.args.mtu, tt.args.podSGEnforcingMode, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } } -func TestRunErrLinkSetNsFd(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_TeardownBranchENIPodNetwork(t *testing.T) { + vlanID := 7 + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), + } - mockContext := m.mockWithFailureAt(t, "link-setns") + vlanRuleForRTTable107 := netlink.NewRule() + vlanRuleForRTTable107.Priority = vlanRulePriority + vlanRuleForRTTable107.Table = 107 - err := mockContext.run(m.netns) - assert.Error(t, err) -} + toContainerRoute := &netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + } -func (m *testMocks) setupMockForVethCreation(failAt string) *mock_netlink.MockLink { - mockHostVeth := mock_netlink.NewMockLink(m.ctrl) + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN - m.netlink.EXPECT().LinkByName(testHostVethName).Return(mockHostVeth, errors.New("hostVeth already exists")) - m.ns.EXPECT().WithNetNSPath(testnetnsPath, gomock.Any()).Return(nil) + fromContainerRule := netlink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = 107 - if failAt == "veth-link-byname" { - m.netlink.EXPECT().LinkByName(testHostVethName).Return(mockHostVeth, errors.New("error on hostVethName")) - return nil + type linkByNameCall struct { + linkName string + link netlink.Link + err error } - m.netlink.EXPECT().LinkByName(testHostVethName).Return(mockHostVeth, nil) - - if failAt == "veth-procsys" { - m.procsys.EXPECT().Set("net/ipv6/conf/"+testHostVethName+"/accept_ra", "0").Return(errors.New("error writing to /proc/sys/")) - return nil + type linkDelCall struct { + link netlink.Link + err error } - var procsysRet error - if failAt == "no-ipv6" { - // Note os.ErrNotExist return - should be ignored - procsysRet = os.ErrNotExist + type routeDelCall struct { + route *netlink.Route + err error } - m.procsys.EXPECT().Set("net/ipv6/conf/"+testHostVethName+"/accept_ra", "0").Return(procsysRet) - m.procsys.EXPECT().Set("net/ipv6/conf/"+testHostVethName+"/accept_redirects", "0").Return(procsysRet) - - if failAt == "veth-link-setup" { - m.netlink.EXPECT().LinkSetUp(mockHostVeth).Return(errors.New("error on LinkSetup")) - return nil + type ruleDelCall struct { + rule *netlink.Rule + err error } - m.netlink.EXPECT().LinkSetUp(mockHostVeth).Return(nil) - return mockHostVeth -} -func (m *testMocks) mockSetupPodNetworkWithFailureAt(t *testing.T, failAt string) { - mockHostVeth := m.setupMockForVethCreation(failAt) - - // skip setting other mocks if test is expected to fail at veth creation. - if strings.HasPrefix(failAt, "veth-") { - return - } - - hwAddr, err := net.ParseMAC(testMAC) - assert.NoError(t, err) - mockLinkAttrs := &netlink.LinkAttrs{ - HardwareAddr: hwAddr, - } - //log.Printf - mockHostVeth.EXPECT().Attrs().Return(mockLinkAttrs) - //add host route - mockHostVeth.EXPECT().Attrs().Return(mockLinkAttrs) - if failAt == "route-replace" { - m.netlink.EXPECT().RouteReplace(gomock.Any()).Return(errors.New("error on RouteReplace")) - return - } - m.netlink.EXPECT().RouteReplace(gomock.Any()).Return(nil) - - testRule := &netlink.Rule{ - SuppressIfgroup: -1, - SuppressPrefixlen: -1, - Priority: -1, - Mark: -1, - Mask: -1, - Goto: -1, - Flow: -1, - } - m.netlink.EXPECT().NewRule().Return(testRule) - // test to-pod rule - m.netlink.EXPECT().RuleDel(gomock.Any()).Return(nil) - m.netlink.EXPECT().RuleAdd(gomock.Any()).Return(nil) - - // test from-pod rule - // FIXME(gus): this is the same as to-pod rule :/ - m.netlink.EXPECT().NewRule().Return(testRule) - m.netlink.EXPECT().RuleDel(gomock.Any()).Return(nil) - m.netlink.EXPECT().RuleAdd(gomock.Any()).Return(nil) -} - -func TestSetupPodNetwork(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() - - m.mockSetupPodNetworkWithFailureAt(t, "") + type fields struct { + linkByNameCalls []linkByNameCall + linkDelCalls []linkDelCall + routeDelCalls []routeDelCall + ruleDelCalls []ruleDelCall + } + type args struct { + containerAddr *net.IPNet + vlanID int + podSGEnforcingMode sgpp.EnforcingMode + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully teardown pod network - pod was setup under strict mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + err: syscall.ESRCH, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForRTTable107, + }, + { + rule: vlanRuleForRTTable107, + }, + { + rule: vlanRuleForRTTable107, + err: syscall.ENOENT, + }, + { + rule: toContainerRule, + err: syscall.ENOENT, + }, + { + rule: fromContainerRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + }, + { + name: "successfully teardown pod network - pod was setup under standard mode", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForRTTable107, + err: syscall.ENOENT, + }, + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + }, + { + rule: fromContainerRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + }, + { + name: "failed to teardown vlan", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + err: errors.New("some error"), + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("TeardownBranchENIPodNetwork: failed to teardown vlan: failed to delete vlan link vlan.eth.7: some error"), + }, + { + name: "failed to delete vlan rule", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForRTTable107, + err: errors.New("some error"), + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStrict, + }, + wantErr: errors.New("TeardownBranchENIPodNetwork: unable to teardown IIF based container routes and rules: failed to delete IIF based rules, rtTable=107: some error"), + }, + { + name: "failed to delete toContainer rule", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: vlanID}, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForRTTable107, + err: syscall.ENOENT, + }, + { + rule: toContainerRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + containerAddr: containerAddr, + vlanID: vlanID, + podSGEnforcingMode: sgpp.EnforcingModeStandard, + }, + wantErr: errors.New("TeardownBranchENIPodNetwork: unable to teardown IP based container routes and rules: failed to delete toContainer rule, containerAddr=192.168.100.42/32, rtTable=main: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } + for _, call := range tt.fields.routeDelCalls { + netLink.EXPECT().RouteDel(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } + n := &linuxNetwork{ + netLink: netLink, + } + err := n.TeardownBranchENIPodNetwork(tt.args.containerAddr, tt.args.vlanID, tt.args.podSGEnforcingMode, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, true, m.netlink, m.ns, mtu, log, m.procsys) - assert.NoError(t, err) } -func TestSetupIPv6PodNetwork(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_createVethPairContext_run(t *testing.T) { + contVethWithIndex1 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Index: 1, + }, + } + hostVethWithIndex9 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eni8ea2c11fe35", + Index: 9, + HardwareAddr: net.HardwareAddr("00:00:5e:00:53:af"), + }, + } - m.mockSetupPodNetworkWithFailureAt(t, "") - v6Addr := &net.IPNet{ - IP: net.ParseIP(testV6IP), - Mask: net.CIDRMask(128, 128), + type linkByNameCall struct { + linkName string + link netlink.Link + err error + } + type linkAddCall struct { + link netlink.Link + err error + } + type linkSetupCall struct { + link netlink.Link + err error + } + type routeReplaceCall struct { + route *netlink.Route + err error + } + type routeAddCall struct { + route *netlink.Route + err error + } + type addrAddCall struct { + link netlink.Link + addr *netlink.Addr + err error + } + type addrListCall struct { + link netlink.Link + family int + addrs []netlink.Addr + err error + } + type neighAddCall struct { + neigh *netlink.Neigh + err error + } + type linkSetNsFdCall struct { + link netlink.Link + fd int + err error + } + type procSysSetCall struct { + key string + value string + err error + } + type nsFDCall struct { + fd uintptr } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, &net.IPNet{}, v6Addr, testTable, cidrs, true, m.netlink, m.ns, mtu, log, m.procsys) - assert.NoError(t, err) -} + type fields struct { + linkByNameCalls []linkByNameCall + linkAddCalls []linkAddCall + linkSetupCalls []linkSetupCall + routeReplaceCalls []routeReplaceCall + routeAddCalls []routeAddCall + addrAddCalls []addrAddCall + addrListCalls []addrListCall + neighAddCalls []neighAddCall + linkSetNsFdCalls []linkSetNsFdCall + procSysSetCalls []procSysSetCall + nsFDCalls []nsFDCall + } + type args struct { + contVethName string + hostVethName string + v4Addr *net.IPNet + v6Addr *net.IPNet + mtu int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully created vethPair for ipv4 pods", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 32), + }, + Gw: net.IPv4(169, 254, 1, 1), + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + neighAddCalls: []neighAddCall{ + { + neigh: &netlink.Neigh{ + LinkIndex: contVethWithIndex1.Attrs().Index, + State: netlink.NUD_PERMANENT, + IP: net.IPv4(169, 254, 1, 1), + HardwareAddr: hostVethWithIndex9.Attrs().HardwareAddr, + }, + }, + }, + linkSetNsFdCalls: []linkSetNsFdCall{ + { + link: hostVethWithIndex9, + fd: 3, + }, + }, + procSysSetCalls: []procSysSetCall{}, + nsFDCalls: []nsFDCall{ + { + fd: uintptr(3), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + }, + { + name: "successfully created vethPair for ipv6 pods", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: net.CIDRMask(128, 128), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv6zero, + Mask: net.CIDRMask(0, 128), + }, + Gw: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + }, + }, + }, + addrListCalls: []addrListCall{ + { + link: contVethWithIndex1, + family: netlink.FAMILY_V6, + addrs: []netlink.Addr{ + { + IPNet: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + }, + }, + }, + }, + neighAddCalls: []neighAddCall{ + { + neigh: &netlink.Neigh{ + LinkIndex: contVethWithIndex1.Attrs().Index, + State: netlink.NUD_PERMANENT, + IP: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + HardwareAddr: hostVethWithIndex9.Attrs().HardwareAddr, + }, + }, + }, + linkSetNsFdCalls: []linkSetNsFdCall{ + { + link: hostVethWithIndex9, + fd: 3, + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eth0/disable_ipv6", + value: "0", + }, + { + key: "net/ipv6/conf/lo/disable_ipv6", + value: "0", + }, + }, + nsFDCalls: []nsFDCall{ + { + fd: uintptr(3), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: nil, + v6Addr: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + mtu: 9001, + }, + }, + { + name: "failed to add vethPair", + fields: fields{ + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("some error"), + }, + { + name: "failed to find hostVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("some error"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to find link \"eni8ea2c11fe35\": some error"), + }, + { + name: "failed to setUp hostVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to set link \"eni8ea2c11fe35\" up: some error"), + }, + { + name: "failed to find contVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + err: errors.New("some error"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to find link \"eth0\": some error"), + }, + { + name: "failed to setUp contVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to set link \"eth0\" up: some error"), + }, + { + name: "failed to add default gateway", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to add default gateway: some error"), + }, + { + name: "failed to add default route", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 32), + }, + Gw: net.IPv4(169, 254, 1, 1), + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to add default route: some error"), + }, + { + name: "failed to add container IP", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 32), + }, + Gw: net.IPv4(169, 254, 1, 1), + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to add IP addr to \"eth0\": some error"), + }, + { + name: "failed to add static ARP", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 32), + }, + Gw: net.IPv4(169, 254, 1, 1), + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + neighAddCalls: []neighAddCall{ + { + neigh: &netlink.Neigh{ + LinkIndex: contVethWithIndex1.Attrs().Index, + State: netlink.NUD_PERMANENT, + IP: net.IPv4(169, 254, 1, 1), + HardwareAddr: hostVethWithIndex9.Attrs().HardwareAddr, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to add static ARP: some error"), + }, + { + name: "failed to move hostVeth to host netNS", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IPv4(169, 254, 1, 1), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv4zero, + Mask: net.CIDRMask(0, 32), + }, + Gw: net.IPv4(169, 254, 1, 1), + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + }, + }, + }, + neighAddCalls: []neighAddCall{ + { + neigh: &netlink.Neigh{ + LinkIndex: contVethWithIndex1.Attrs().Index, + State: netlink.NUD_PERMANENT, + IP: net.IPv4(169, 254, 1, 1), + HardwareAddr: hostVethWithIndex9.Attrs().HardwareAddr, + }, + }, + }, + linkSetNsFdCalls: []linkSetNsFdCall{ + { + link: hostVethWithIndex9, + fd: 3, + err: errors.New("some error"), + }, + }, + nsFDCalls: []nsFDCall{ + { + fd: uintptr(3), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: &net.IPNet{ + IP: net.ParseIP("192.168.120.1"), + Mask: net.CIDRMask(32, 32), + }, + v6Addr: nil, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed to move veth to host netns: some error"), + }, + { + name: "failed to enable IPv6 on eth0", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eth0/disable_ipv6", + value: "0", + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: nil, + v6Addr: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + mtu: 9001, + }, + wantErr: errors.New("setupVeth network: failed to enable IPv6 on container veth interface: some error"), + }, + { + name: "failed to enable IPv6 on lo", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eth0/disable_ipv6", + value: "0", + }, + { + key: "net/ipv6/conf/lo/disable_ipv6", + value: "0", + err: errors.New("some error"), + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: nil, + v6Addr: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + mtu: 9001, + }, + wantErr: errors.New("setupVeth network: failed to enable IPv6 on container's lo interface: some error"), + }, + { + name: "failed to wait IPv6 address become stable", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + { + linkName: "eth0", + link: contVethWithIndex1, + }, + }, + linkAddCalls: []linkAddCall{ + { + link: &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eth0", + Flags: net.FlagUp, + MTU: 9001, + }, + PeerName: "eni8ea2c11fe35", + }, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + { + link: contVethWithIndex1, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_LINK, + Dst: &net.IPNet{ + IP: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + Mask: net.CIDRMask(128, 128), + }, + }, + }, + }, + routeAddCalls: []routeAddCall{ + { + route: &netlink.Route{ + LinkIndex: contVethWithIndex1.Attrs().Index, + Scope: netlink.SCOPE_UNIVERSE, + Dst: &net.IPNet{ + IP: net.IPv6zero, + Mask: net.CIDRMask(0, 128), + }, + Gw: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + }, + }, + }, + addrAddCalls: []addrAddCall{ + { + link: contVethWithIndex1, + addr: &netlink.Addr{ + IPNet: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + }, + }, + }, + addrListCalls: []addrListCall{ + { + link: contVethWithIndex1, + family: netlink.FAMILY_V6, + err: errors.New("some error"), + }, + }, + neighAddCalls: []neighAddCall{ + { + neigh: &netlink.Neigh{ + LinkIndex: contVethWithIndex1.Attrs().Index, + State: netlink.NUD_PERMANENT, + IP: net.IP{0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}, + HardwareAddr: hostVethWithIndex9.Attrs().HardwareAddr, + }, + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eth0/disable_ipv6", + value: "0", + }, + { + key: "net/ipv6/conf/lo/disable_ipv6", + value: "0", + }, + }, + }, + args: args{ + contVethName: "eth0", + hostVethName: "eni8ea2c11fe35", + v4Addr: nil, + v6Addr: &net.IPNet{ + IP: net.ParseIP("2001:db8:3333:4444:5555:6666:7777:8888"), + Mask: net.CIDRMask(128, 128), + }, + mtu: 9001, + }, + wantErr: errors.New("setup NS network: failed while waiting for v6 addresses to be stable: could not list addresses: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() -func TestSetupPodNetworkErrNoIPv6(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkAddCalls { + netLink.EXPECT().LinkAdd(call.link).Return(call.err) + } + for _, call := range tt.fields.linkSetupCalls { + netLink.EXPECT().LinkSetUp(call.link).Return(call.err) + } + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } + for _, call := range tt.fields.routeAddCalls { + netLink.EXPECT().RouteAdd(call.route).Return(call.err) + } + for _, call := range tt.fields.addrAddCalls { + netLink.EXPECT().AddrAdd(call.link, call.addr).Return(call.err) + } + for _, call := range tt.fields.addrListCalls { + netLink.EXPECT().AddrList(call.link, call.family).Return(call.addrs, call.err) + } + for _, call := range tt.fields.neighAddCalls { + netLink.EXPECT().NeighAdd(call.neigh).Return(call.err) + } + for _, call := range tt.fields.linkSetNsFdCalls { + netLink.EXPECT().LinkSetNsFd(call.link, call.fd).Return(call.err) + } - m.mockSetupPodNetworkWithFailureAt(t, "no-ipv6") + procSys := mock_procsyswrapper.NewMockProcSys(ctrl) + for _, call := range tt.fields.procSysSetCalls { + procSys.EXPECT().Set(call.key, call.value).Return(call.err) + } + hostNS := mock_ns.NewMockNetNS(ctrl) + for _, call := range tt.fields.nsFDCalls { + // we just assume the createVethContext executes, the logic of createVethContext will be tested by createVethContext itself. + hostNS.EXPECT().Fd().Return(call.fd) + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + createVethContext := &createVethPairContext{ + contVethName: tt.args.contVethName, + hostVethName: tt.args.hostVethName, + v4Addr: tt.args.v4Addr, + v6Addr: tt.args.v6Addr, + mtu: tt.args.mtu, + netLink: netLink, + procSys: procSys, + } + err := createVethContext.run(hostNS) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, true, m.netlink, m.ns, mtu, log, m.procsys) - assert.NoError(t, err) } -func TestSetupPodNetworkErrLinkByName(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_setupVeth(t *testing.T) { + hostVethWithIndex9 := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{ + Name: "eni8ea2c11fe35", + Index: 9, + }, + } + type linkByNameCall struct { + linkName string + link netlink.Link + err error + } + type linkDelCall struct { + link netlink.Link + err error + } + type linkSetupCall struct { + link netlink.Link + err error + } + type withNetNSPathCall struct { + netNSPath string + err error + } + type procSysSetCall struct { + key string + value string + err error + } - m.mockSetupPodNetworkWithFailureAt(t, "veth-link-byname") + type fields struct { + linkByNameCalls []linkByNameCall + linkDelCalls []linkDelCall + linkSetupCalls []linkSetupCall + withNetNSPathCalls []withNetNSPathCall + procSysSetCalls []procSysSetCall + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + type args struct { + hostVethName string + contVethName string + netnsPath string + mtu int } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, false, m.netlink, m.ns, mtu, log, m.procsys) + tests := []struct { + name string + fields fields + args args + want netlink.Link + wantErr error + }{ + { + name: "successfully setup veth - old hostVeth don't exists", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + want: hostVethWithIndex9, + }, + { + name: "successfully setup veth - old hostVeth exists", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + want: hostVethWithIndex9, + }, + { + name: "failed to delete old hostVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: hostVethWithIndex9, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to delete old hostVeth eni8ea2c11fe35: some error"), + }, + { + name: "failed to create veth pair", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to setup veth network: some error"), + }, + { + name: "failed to obtain created hostVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to find hostVeth eni8ea2c11fe35: not exists"), + }, + { + name: "failed to disable IPv6 accept_ra", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to disable IPv6 router advertisements: some error"), + }, + { + name: "failed to disable IPv6 accept_redirects", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to disable IPv6 ICMP redirects: some error"), + }, + { + name: "failed to disable IPv6 accept_ra and accept_redirects due to lack IPv6 support", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + err: syscall.ENOENT, + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + err: syscall.ENOENT, + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + want: hostVethWithIndex9, + }, + { + name: "failed to setUp hostVeth", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "eni8ea2c11fe35", + err: errors.New("not exists"), + }, + { + linkName: "eni8ea2c11fe35", + link: hostVethWithIndex9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: hostVethWithIndex9, + err: errors.New("some error"), + }, + }, + withNetNSPathCalls: []withNetNSPathCall{ + { + netNSPath: "/proc/42/ns/net", + }, + }, + procSysSetCalls: []procSysSetCall{ + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_ra", + value: "0", + }, + { + key: "net/ipv6/conf/eni8ea2c11fe35/accept_redirects", + value: "0", + }, + }, + }, + args: args{ + hostVethName: "eni8ea2c11fe35", + contVethName: "eth0", + netnsPath: "/proc/42/ns/net", + }, + wantErr: errors.New("failed to setup hostVeth eni8ea2c11fe35: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - assert.Error(t, err) -} + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } + for _, call := range tt.fields.linkSetupCalls { + netLink.EXPECT().LinkSetUp(call.link).Return(call.err) + } -func TestSetupPodNetworkErrLinkSetup(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + ns := mock_nswrapper.NewMockNS(ctrl) + for _, call := range tt.fields.withNetNSPathCalls { + // we just assume the createVethContext executes, the logic of createVethContext will be tested by createVethContext itself. + ns.EXPECT().WithNetNSPath(call.netNSPath, gomock.Any()).Return(call.err) + } - m.mockSetupPodNetworkWithFailureAt(t, "veth-link-setup") + procSys := mock_procsyswrapper.NewMockProcSys(ctrl) + for _, call := range tt.fields.procSysSetCalls { + procSys.EXPECT().Set(call.key, call.value).Return(call.err) + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + n := &linuxNetwork{ + netLink: netLink, + ns: ns, + procSys: procSys, + } + got, err := n.setupVeth(tt.args.hostVethName, tt.args.contVethName, tt.args.netnsPath, nil, nil, tt.args.mtu, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } + }) } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, false, m.netlink, m.ns, mtu, log, m.procsys) - - assert.Error(t, err) } -func TestSetupPodNetworkErrProcSys(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_setupVlan(t *testing.T) { + vlanID := 7 + parentIfIndex := 3 + eniMac := "01:23:45:67:89:ab" - m.mockSetupPodNetworkWithFailureAt(t, "veth-procsys") + vlanLinkPostAddWithIndex9 := buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac) + vlanLinkPostAddWithIndex9.Index = 9 + type linkByNameCall struct { + linkName string + link netlink.Link + err error + } + type linkAddCall struct { + link netlink.Link + linkIndex int + err error + } + type linkDelCall struct { + link netlink.Link + err error + } + type linkSetupCall struct { + link netlink.Link + err error + } + type routeReplaceCall struct { + route *netlink.Route + err error + } + type fields struct { + linkByNameCalls []linkByNameCall + linkAddCalls []linkAddCall + linkDelCalls []linkDelCall + linkSetupCalls []linkSetupCall + routeReplaceCalls []routeReplaceCall + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + type args struct { + vlanID int + eniMAC string + subnetGW string + parentIfIndex int + rtTable int } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, false, m.netlink, m.ns, mtu, log, m.procsys) + tests := []struct { + name string + fields fields + args args + want netlink.Link + wantErr error + }{ + { + name: "successfully setup vlan - old vlan don't exists", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + err: errors.Errorf("don't exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: vlanLinkPostAddWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: 9, + Dst: &net.IPNet{IP: net.ParseIP("192.168.120.1"), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: 9, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP("192.168.120.1"), + Table: 107, + }, + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + want: vlanLinkPostAddWithIndex9, + }, + { + name: "successfully setup vlan - old vlan exists", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + }, + }, + linkDelCalls: []linkDelCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: vlanLinkPostAddWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: 9, + Dst: &net.IPNet{IP: net.ParseIP("192.168.120.1"), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + }, + { + route: &netlink.Route{ + LinkIndex: 9, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP("192.168.120.1"), + Table: 107, + }, + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + want: vlanLinkPostAddWithIndex9, + }, + { + name: "failed to delete old vlan link", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + }, + }, + linkDelCalls: []linkDelCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + err: errors.New("some error"), + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + wantErr: errors.New("failed to delete old vlan link vlan.eth.7: some error"), + }, + { + name: "failed to add vlan link", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + err: errors.Errorf("don't exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + err: errors.New("some error"), + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + wantErr: errors.New("failed to add vlan link vlan.eth.7: some error"), + }, + { + name: "failed to setUp vlan link", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + err: errors.Errorf("don't exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: vlanLinkPostAddWithIndex9, + err: errors.New("some error"), + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + wantErr: errors.New("failed to setUp vlan link vlan.eth.7: some error"), + }, + { + name: "failed to replace routes", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + err: errors.Errorf("don't exists"), + }, + }, + linkAddCalls: []linkAddCall{ + { + link: buildVlanLink("vlan.eth.7", vlanID, parentIfIndex, eniMac), + linkIndex: 9, + }, + }, + linkSetupCalls: []linkSetupCall{ + { + link: vlanLinkPostAddWithIndex9, + }, + }, + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: 9, + Dst: &net.IPNet{IP: net.ParseIP("192.168.120.1"), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 107, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + vlanID: vlanID, + eniMAC: eniMac, + subnetGW: "192.168.120.1", + parentIfIndex: parentIfIndex, + rtTable: 107, + }, + wantErr: errors.New("failed to replace route entry 192.168.120.1 via 192.168.120.1: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - assert.Error(t, err) -} + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkAddCalls { + netLink.EXPECT().LinkAdd(call.link).DoAndReturn(func(link netlink.Link) error { + if call.err != nil { + return call.err + } + vlanBeforeAdd := link.(*netlink.Vlan) + vlanBeforeAdd.Index = call.linkIndex + return nil + }) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } + for _, call := range tt.fields.linkSetupCalls { + netLink.EXPECT().LinkSetUp(call.link).Return(call.err) + } + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } -func TestSetupPodNetworkErrRouteReplace(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() + n := &linuxNetwork{ + netLink: netLink, + } + got, err := n.setupVlan(tt.args.vlanID, tt.args.eniMAC, tt.args.subnetGW, tt.args.parentIfIndex, tt.args.rtTable, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.want, got) + } + }) + } +} - m.mockSetupPodNetworkWithFailureAt(t, "route-replace") +func Test_linuxNetwork_teardownVlan(t *testing.T) { + type linkByNameCall struct { + linkName string + link netlink.Link + err error + } + type linkDelCall struct { + link netlink.Link + err error + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + type fields struct { + linkByNameCalls []linkByNameCall + linkDelCalls []linkDelCall + } + type args struct { + vlanID int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully deleted vlan link", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: 7}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: 7}, + }, + }, + }, + args: args{ + vlanID: 7, + }, + }, + { + name: "failed to deleted vlan link", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + link: &netlink.Vlan{VlanId: 7}, + }, + }, + linkDelCalls: []linkDelCall{ + { + link: &netlink.Vlan{VlanId: 7}, + err: errors.New("some error"), + }, + }, + }, + args: args{ + vlanID: 7, + }, + wantErr: errors.New("failed to delete vlan link vlan.eth.7: some error"), + }, + { + name: "vlan link don't exists", + fields: fields{ + linkByNameCalls: []linkByNameCall{ + { + linkName: "vlan.eth.7", + err: errors.New("don't exists"), + }, + }, + }, + args: args{ + vlanID: 7, + }, + }, } - var cidrs []string - err := setupNS(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, testTable, cidrs, false, m.netlink, m.ns, mtu, log, m.procsys) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - assert.Error(t, err) -} + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + for _, call := range tt.fields.linkByNameCalls { + netLink.EXPECT().LinkByName(call.linkName).Return(call.link, call.err) + } + for _, call := range tt.fields.linkDelCalls { + netLink.EXPECT().LinkDel(call.link).Return(call.err) + } -func TestTearDownPodNetwork(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() - - testRule := &netlink.Rule{ - SuppressIfgroup: -1, - SuppressPrefixlen: -1, - Priority: -1, - Mark: -1, - Mask: -1, - Goto: -1, - Flow: -1, - } - gomock.InOrder( - m.netlink.EXPECT().NewRule().Return(testRule), - // test to-pod rule - m.netlink.EXPECT().RuleDel(gomock.Any()).Return(nil), - - // test from-pod rule - m.netlink.EXPECT().RouteDel(gomock.Any()).Return(nil), - ) - - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), - } - err := tearDownNS(addr, 0, m.netlink, log) - assert.NoError(t, err) + n := &linuxNetwork{ + netLink: netLink, + } + err := n.teardownVlan(tt.args.vlanID, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } } -func TestTeardownPodENINetworkHappyCase(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() - - mockVlan := mock_netlink.NewMockLink(m.ctrl) - linuxNetwork := &linuxNetwork{ - netLink: m.netlink, - ns: m.ns, - procSys: m.procsys, +func Test_linuxNetwork_setupIPBasedContainerRouteRules(t *testing.T) { + hostVethAttrs := netlink.LinkAttrs{ + Name: "eni00bcc08c834", + Index: 7, + } + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), } - actualRule := &netlink.Rule{} - m.netlink.EXPECT().NewRule().Return(actualRule) + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN - expectedRule := &netlink.Rule{ - Priority: vlanRulePriority, - Table: 101, + fromContainerRule := netlink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = 101 + type routeReplaceCall struct { + route *netlink.Route + err error + } + type ruleAddCall struct { + rule *netlink.Rule + err error + } + type fields struct { + routeReplaceCalls []routeReplaceCall + ruleAddCalls []ruleAddCall + } + type args struct { + hostVethAttrs netlink.LinkAttrs + containerAddr *net.IPNet + rtTable int } - gomock.InOrder( - m.netlink.EXPECT().LinkByName(testVlanName).Return(mockVlan, nil), - m.netlink.EXPECT().LinkDel(mockVlan).Return(nil), - // delete ip rules for the pod. - m.netlink.EXPECT().RuleDel(gomock.Eq(expectedRule)).Return(nil), - m.netlink.EXPECT().RuleDel(gomock.Eq(expectedRule)).Return(syscall.ENOENT), - ) + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully setup routes and rules - without dedicated route table", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully setup routes and rules - with dedicated route table", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + }, + { + name: "successfully setup routes and rules - toContainerRule already exists", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + err: syscall.EEXIST, + }, + { + rule: fromContainerRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + }, + { + name: "successfully setup routes and rules - fromContainerRule already exists", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + err: syscall.EEXIST, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + }, + { + name: "failed to setup container route", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + wantErr: errors.New("failed to setup container route, containerAddr=192.168.100.42/32, hostVeth=eni00bcc08c834, rtTable=main: some error"), + }, + { + name: "failed to setup toContainer rule", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + wantErr: errors.New("failed to setup toContainer rule, containerAddr=192.168.100.42/32, rtTable=main: some error"), + }, + { + name: "failed to setup fromContainer rule", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + rtTable: 101, + }, + wantErr: errors.New("failed to setup fromContainer rule, containerAddr=192.168.100.42/32, rtTable=101: some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - err := linuxNetwork.TeardownPodENINetwork(1, log) - assert.NoError(t, err) -} + hostVeth := mock_netlink.NewMockLink(ctrl) + hostVeth.EXPECT().Attrs().Return(&tt.args.hostVethAttrs).AnyTimes() -func (m *testMocks) mockSetupPodENINetworkWithFailureAt(t *testing.T, addr *net.IPNet, failAt string) { - mockHostVeth := m.setupMockForVethCreation(failAt) + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleAddCalls { + netLink.EXPECT().RuleAdd(call.rule).Return(call.err) + } - // skip setting pod ENI mocks if test is expected to fail at veth creation. - if strings.HasPrefix(failAt, "veth-") { - return + n := &linuxNetwork{ + netLink: netLink, + } + err := n.setupIPBasedContainerRouteRules(hostVeth, tt.args.containerAddr, tt.args.rtTable, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) } +} - // link will not exist initially - m.netlink.EXPECT().LinkByName(testVlanName).Return(nil, - errors.New("link not found")) - - actualRule := &netlink.Rule{} - m.netlink.EXPECT().NewRule().Return(actualRule) - - oldVethRule := &netlink.Rule{ - IifName: testHostVethName, - Priority: vlanRulePriority, +func Test_linuxNetwork_teardownIPBasedContainerRouteRules(t *testing.T) { + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), } - m.netlink.EXPECT().RuleDel(gomock.Eq(oldVethRule)).Return(syscall.ENOENT) - vlanLink := buildVlanLink(1, 2, "eniMacAddress") - // add the link - m.netlink.EXPECT().LinkAdd(gomock.Eq(vlanLink)).Return(nil) + toContainerRoute := &netlink.Route{ + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: unix.RT_TABLE_MAIN, + } + toContainerRule := netlink.NewRule() + toContainerRule.Dst = containerAddr + toContainerRule.Priority = toContainerRulePriority + toContainerRule.Table = unix.RT_TABLE_MAIN - // bring up the link - m.netlink.EXPECT().LinkSetUp(gomock.Eq(vlanLink)).Return(nil) + fromContainerRule := netlink.NewRule() + fromContainerRule.Src = containerAddr + fromContainerRule.Priority = fromContainerRulePriority + fromContainerRule.Table = 101 + type routeDelCall struct { + route *netlink.Route + err error + } + type ruleDelCall struct { + rule *netlink.Rule + err error + } + type fields struct { + routeDelCalls []routeDelCall + ruleDelCalls []ruleDelCall + } - vlanRoutes := buildRoutesForVlan(101, 0, net.ParseIP("10.1.0.1")) + type args struct { + containerAddr *net.IPNet + rtTable int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully teardown routes and rules - without dedicated route table", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully teardown routes and rules - with dedicated route table", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + { + rule: fromContainerRule, + }, + { + rule: fromContainerRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + rtTable: 101, + }, + }, + { + name: "successfully teardown routes and rules - succeed when route already deleted", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + err: syscall.ESRCH, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully teardown routes and rules - succeed even when route deletion failed", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + err: errors.New("some error"), + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + rtTable: unix.RT_TABLE_MAIN, + }, + }, + { + name: "successfully teardown routes and rules - toContainerRule already deleted", + fields: fields{ + routeDelCalls: []routeDelCall{ + { + route: toContainerRoute, + }, + }, + ruleDelCalls: []ruleDelCall{ + { + rule: toContainerRule, + err: syscall.ENOENT, + }, + { + rule: fromContainerRule, + }, + { + rule: fromContainerRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + containerAddr: containerAddr, + rtTable: 101, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - // two routes for vlan - m.netlink.EXPECT().RouteReplace(gomock.Eq(&vlanRoutes[0])).Return(nil) - m.netlink.EXPECT().RouteReplace(gomock.Eq(&vlanRoutes[1])).Return(nil) + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.routeDelCalls { + netLink.EXPECT().RouteDel(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } - hwAddr, _ := net.ParseMAC(testMAC) - mockLinkAttrs := &netlink.LinkAttrs{ - HardwareAddr: hwAddr, - Name: testHostVethName, - Index: 3, + n := &linuxNetwork{ + netLink: netLink, + } + err := n.teardownIPBasedContainerRouteRules(tt.args.containerAddr, tt.args.rtTable, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) } - mockHostVeth.EXPECT().Attrs().Return(mockLinkAttrs).Times(2) +} - // add route for host veth - route := netlink.Route{ - LinkIndex: 3, - Scope: netlink.SCOPE_LINK, - Dst: addr, - Table: 101, +func Test_linuxNetwork_setupIIFBasedContainerRouteRules(t *testing.T) { + hostVethAttrs := netlink.LinkAttrs{ + Name: "eni00bcc08c834", + Index: 7, + } + hostVlanAttrs := netlink.LinkAttrs{ + Name: "vlan.eth.1", + Index: 3, + } + containerAddr := &net.IPNet{ + IP: net.ParseIP("192.168.100.42"), + Mask: net.CIDRMask(32, 32), } - m.netlink.EXPECT().RouteReplace(gomock.Eq(&route)).Return(nil) - m.netlink.EXPECT().NewRule().Return(actualRule) + rtTable := 101 + fromHostVlanRule := netlink.NewRule() + fromHostVlanRule.IifName = hostVlanAttrs.Name + fromHostVlanRule.Priority = vlanRulePriority + fromHostVlanRule.Table = rtTable - // add two ip rules based on iff interfaces - expectedRule1 := &netlink.Rule{ - Priority: vlanRulePriority, - Table: 101, - IifName: vlanLink.Name, + fromHostVethRule := netlink.NewRule() + fromHostVethRule.IifName = hostVethAttrs.Name + fromHostVethRule.Priority = vlanRulePriority + fromHostVethRule.Table = rtTable + type routeReplaceCall struct { + route *netlink.Route + err error + } + type ruleAddCall struct { + rule *netlink.Rule + err error + } + type fields struct { + routeReplaceCalls []routeReplaceCall + ruleAddCalls []ruleAddCall + } + type args struct { + hostVethAttrs netlink.LinkAttrs + containerAddr *net.IPNet + hostVlanAttrs netlink.LinkAttrs + rtTable int + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "successfully setup routes and rules", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + }, + { + rule: fromHostVethRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + }, + { + name: "successfully setup routes and rules - fromHostVlanRule already exists", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + err: syscall.EEXIST, + }, + { + rule: fromHostVethRule, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + }, + { + name: "successfully setup routes and rules - fromHostVethRule already exists", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + }, + { + rule: fromHostVethRule, + err: syscall.EEXIST, + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + }, + { + name: "failed to setup container route", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + wantErr: errors.New("failed to setup container route, containerAddr=192.168.100.42/32, hostVeth=eni00bcc08c834, rtTable=101: some error"), + }, + { + name: "failed to setup fromHostVlan rule", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + wantErr: errors.New("unable to setup fromHostVlan rule, hostVlan=vlan.eth.1, rtTable=101: some error"), + }, + { + name: "failed to setup fromHostVeth rule", + fields: fields{ + routeReplaceCalls: []routeReplaceCall{ + { + route: &netlink.Route{ + LinkIndex: hostVethAttrs.Index, + Scope: netlink.SCOPE_LINK, + Dst: containerAddr, + Table: rtTable, + }, + }, + }, + ruleAddCalls: []ruleAddCall{ + { + rule: fromHostVlanRule, + }, + { + rule: fromHostVethRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + hostVethAttrs: hostVethAttrs, + containerAddr: containerAddr, + hostVlanAttrs: hostVlanAttrs, + rtTable: rtTable, + }, + wantErr: errors.New("unable to setup fromHostVeth rule, hostVeth=eni00bcc08c834, rtTable=101: some error"), + }, } - m.netlink.EXPECT().RuleAdd(gomock.Eq(expectedRule1)).Return(nil) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + hostVeth := mock_netlink.NewMockLink(ctrl) + hostVeth.EXPECT().Attrs().Return(&tt.args.hostVethAttrs).AnyTimes() + hostVlan := mock_netlink.NewMockLink(ctrl) + hostVlan.EXPECT().Attrs().Return(&tt.args.hostVlanAttrs).AnyTimes() + + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.routeReplaceCalls { + netLink.EXPECT().RouteReplace(call.route).Return(call.err) + } + for _, call := range tt.fields.ruleAddCalls { + netLink.EXPECT().RuleAdd(call.rule).Return(call.err) + } - expectedRule2 := &netlink.Rule{ - Priority: vlanRulePriority, - Table: 101, - IifName: testHostVethName, + n := &linuxNetwork{ + netLink: netLink, + } + err := n.setupIIFBasedContainerRouteRules(hostVeth, tt.args.containerAddr, hostVlan, tt.args.rtTable, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) } - m.netlink.EXPECT().RuleAdd(gomock.Eq(expectedRule2)).Return(nil) } -func TestSetupPodENINetworkHappyCase(t *testing.T) { - m := setup(t) - defer m.ctrl.Finish() +func Test_linuxNetwork_teardownIIFBasedContainerRouteRules(t *testing.T) { + vlanRuleForTableID101 := netlink.NewRule() + vlanRuleForTableID101.Priority = vlanRulePriority + vlanRuleForTableID101.Table = 101 + type ruleDelCall struct { + rule *netlink.Rule + err error + } + type fields struct { + ruleDelCalls []ruleDelCall + } - addr := &net.IPNet{ - IP: net.ParseIP(testIP), - Mask: net.IPv4Mask(255, 255, 255, 255), + type args struct { + rtTable int } - t1 := &linuxNetwork{ - netLink: m.netlink, - ns: m.ns, - procSys: m.procsys, + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "teardown both rules successfully", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForTableID101, + }, + { + rule: vlanRuleForTableID101, + }, + { + rule: vlanRuleForTableID101, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + rtTable: 101, + }, + }, + { + name: "failed to delete rules", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: vlanRuleForTableID101, + err: errors.New("some error"), + }, + }, + }, + args: args{ + rtTable: 101, + }, + wantErr: errors.New("failed to delete IIF based rules, rtTable=101: some error"), + }, } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + netLink.EXPECT().NewRule().DoAndReturn(func() *netlink.Rule { return netlink.NewRule() }).AnyTimes() + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } + n := &linuxNetwork{ + netLink: netLink, + } + err := n.teardownIIFBasedContainerRouteRules(tt.args.rtTable, testLogger) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} - m.mockSetupPodENINetworkWithFailureAt(t, addr, "") +func Test_buildRoutesForVlan(t *testing.T) { + type args struct { + vlanTableID int + vlanIndex int + gw net.IP + } + tests := []struct { + name string + args args + want []netlink.Route + }{ + { + name: "IPv4", + args: args{ + vlanTableID: 101, + vlanIndex: 7, + gw: net.ParseIP("192.168.128.1"), + }, + want: []netlink.Route{ + { + LinkIndex: 7, + Dst: &net.IPNet{IP: net.ParseIP("192.168.128.1"), Mask: net.CIDRMask(32, 32)}, + Scope: netlink.SCOPE_LINK, + Table: 101, + }, + { + LinkIndex: 7, + Dst: &net.IPNet{IP: net.IPv4zero, Mask: net.CIDRMask(0, 32)}, + Scope: netlink.SCOPE_UNIVERSE, + Gw: net.ParseIP("192.168.128.1"), + Table: 101, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildRoutesForVlan(tt.args.vlanTableID, tt.args.vlanIndex, tt.args.gw) + assert.Equal(t, tt.want, got) + }) + } +} - err := t1.SetupPodENINetwork(testHostVethName, testContVethName, testnetnsPath, addr, &net.IPNet{}, 1, "eniMacAddress", - "10.1.0.1", 2, mtu, log) +func Test_buildVlanLinkName(t *testing.T) { + type args struct { + vlanID int + } + tests := []struct { + name string + args args + want string + }{ + { + name: "vlanID == 1", + args: args{ + vlanID: 1, + }, + want: "vlan.eth.1", + }, + { + name: "vlanID == 2", + args: args{ + vlanID: 2, + }, + want: "vlan.eth.2", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildVlanLinkName(tt.args.vlanID) + assert.Equal(t, tt.want, got) + }) + } +} - assert.NoError(t, err) +func Test_buildVlanLink(t *testing.T) { + sampleMacAddress := "00:00:5e:00:53:af" + sampleMac, _ := net.ParseMAC(sampleMacAddress) + type args struct { + vlanName string + vlanID int + parentIfIndex int + eniMAC string + } + tests := []struct { + name string + args args + wantVlanLinkName string + wantVlanLinkID int + wantVlanLinkParentIfIndex int + wantVlanLinkENIMac net.HardwareAddr + }{ + { + name: "vlan.eth.1", + args: args{ + vlanName: "vlan.eth.1", + vlanID: 1, + parentIfIndex: 3, + eniMAC: "00:00:5e:00:53:af", + }, + wantVlanLinkName: "vlan.eth.1", + wantVlanLinkID: 1, + wantVlanLinkParentIfIndex: 3, + wantVlanLinkENIMac: sampleMac, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := buildVlanLink(tt.args.vlanName, tt.args.vlanID, tt.args.parentIfIndex, tt.args.eniMAC) + assert.Equal(t, tt.wantVlanLinkName, got.Attrs().Name) + assert.Equal(t, tt.wantVlanLinkID, got.VlanId) + assert.Equal(t, tt.wantVlanLinkParentIfIndex, got.Attrs().ParentIndex) + assert.Equal(t, tt.wantVlanLinkENIMac, got.Attrs().HardwareAddr) + }) + } } diff --git a/cmd/routed-eni-cni-plugin/driver/mocks/driver_mocks.go b/cmd/routed-eni-cni-plugin/driver/mocks/driver_mocks.go index 3ed6f5e491..41b0eac7d8 100644 --- a/cmd/routed-eni-cni-plugin/driver/mocks/driver_mocks.go +++ b/cmd/routed-eni-cni-plugin/driver/mocks/driver_mocks.go @@ -22,6 +22,7 @@ import ( net "net" reflect "reflect" + sgpp "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" logger "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/logger" gomock "github.com/golang/mock/gomock" ) @@ -49,58 +50,58 @@ func (m *MockNetworkAPIs) EXPECT() *MockNetworkAPIsMockRecorder { return m.recorder } -// SetupNS mocks base method -func (m *MockNetworkAPIs) SetupNS(arg0, arg1, arg2 string, arg3, arg4 *net.IPNet, arg5 int, arg6 []string, arg7 bool, arg8 int, arg9 logger.Logger) error { +// SetupBranchENIPodNetwork mocks base method +func (m *MockNetworkAPIs) SetupBranchENIPodNetwork(arg0, arg1, arg2 string, arg3, arg4 *net.IPNet, arg5 int, arg6, arg7 string, arg8, arg9 int, arg10 sgpp.EnforcingMode, arg11 logger.Logger) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetupNS", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) + ret := m.ctrl.Call(m, "SetupBranchENIPodNetwork", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) ret0, _ := ret[0].(error) return ret0 } -// SetupNS indicates an expected call of SetupNS -func (mr *MockNetworkAPIsMockRecorder) SetupNS(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9 interface{}) *gomock.Call { +// SetupBranchENIPodNetwork indicates an expected call of SetupBranchENIPodNetwork +func (mr *MockNetworkAPIsMockRecorder) SetupBranchENIPodNetwork(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupNS", reflect.TypeOf((*MockNetworkAPIs)(nil).SetupNS), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupBranchENIPodNetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).SetupBranchENIPodNetwork), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11) } -// SetupPodENINetwork mocks base method -func (m *MockNetworkAPIs) SetupPodENINetwork(arg0, arg1, arg2 string, arg3, arg4 *net.IPNet, arg5 int, arg6, arg7 string, arg8, arg9 int, arg10 logger.Logger) error { +// SetupPodNetwork mocks base method +func (m *MockNetworkAPIs) SetupPodNetwork(arg0, arg1, arg2 string, arg3, arg4 *net.IPNet, arg5, arg6 int, arg7 logger.Logger) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetupPodENINetwork", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + ret := m.ctrl.Call(m, "SetupPodNetwork", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(error) return ret0 } -// SetupPodENINetwork indicates an expected call of SetupPodENINetwork -func (mr *MockNetworkAPIsMockRecorder) SetupPodENINetwork(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 interface{}) *gomock.Call { +// SetupPodNetwork indicates an expected call of SetupPodNetwork +func (mr *MockNetworkAPIsMockRecorder) SetupPodNetwork(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupPodENINetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).SetupPodENINetwork), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetupPodNetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).SetupPodNetwork), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } -// TeardownNS mocks base method -func (m *MockNetworkAPIs) TeardownNS(arg0 *net.IPNet, arg1 int, arg2 logger.Logger) error { +// TeardownBranchENIPodNetwork mocks base method +func (m *MockNetworkAPIs) TeardownBranchENIPodNetwork(arg0 *net.IPNet, arg1 int, arg2 sgpp.EnforcingMode, arg3 logger.Logger) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TeardownNS", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "TeardownBranchENIPodNetwork", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } -// TeardownNS indicates an expected call of TeardownNS -func (mr *MockNetworkAPIsMockRecorder) TeardownNS(arg0, arg1, arg2 interface{}) *gomock.Call { +// TeardownBranchENIPodNetwork indicates an expected call of TeardownBranchENIPodNetwork +func (mr *MockNetworkAPIsMockRecorder) TeardownBranchENIPodNetwork(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TeardownNS", reflect.TypeOf((*MockNetworkAPIs)(nil).TeardownNS), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TeardownBranchENIPodNetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).TeardownBranchENIPodNetwork), arg0, arg1, arg2, arg3) } -// TeardownPodENINetwork mocks base method -func (m *MockNetworkAPIs) TeardownPodENINetwork(arg0 int, arg1 logger.Logger) error { +// TeardownPodNetwork mocks base method +func (m *MockNetworkAPIs) TeardownPodNetwork(arg0 *net.IPNet, arg1 int, arg2 logger.Logger) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TeardownPodENINetwork", arg0, arg1) + ret := m.ctrl.Call(m, "TeardownPodNetwork", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } -// TeardownPodENINetwork indicates an expected call of TeardownPodENINetwork -func (mr *MockNetworkAPIsMockRecorder) TeardownPodENINetwork(arg0, arg1 interface{}) *gomock.Call { +// TeardownPodNetwork indicates an expected call of TeardownPodNetwork +func (mr *MockNetworkAPIsMockRecorder) TeardownPodNetwork(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TeardownPodENINetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).TeardownPodENINetwork), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TeardownPodNetwork", reflect.TypeOf((*MockNetworkAPIs)(nil).TeardownPodNetwork), arg0, arg1, arg2) } diff --git a/cmd/routed-eni-cni-plugin/driver/utils.go b/cmd/routed-eni-cni-plugin/driver/utils.go new file mode 100644 index 0000000000..8f96819dd1 --- /dev/null +++ b/cmd/routed-eni-cni-plugin/driver/utils.go @@ -0,0 +1,35 @@ +package driver + +import ( + "syscall" + + "github.com/aws/amazon-vpc-cni-k8s/pkg/netlinkwrapper" + "github.com/vishvananda/netlink" +) + +// netLinkRuleDelAll deletes all matching route rules. +func netLinkRuleDelAll(netlink netlinkwrapper.NetLink, rule *netlink.Rule) error { + for { + if err := netlink.RuleDel(rule); err != nil { + if !containsNoSuchRule(err) { + return err + } + break + } + } + return nil +} + +func containsNoSuchRule(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + return errno == syscall.ENOENT + } + return false +} + +func isRuleExistsError(err error) bool { + if errno, ok := err.(syscall.Errno); ok { + return errno == syscall.EEXIST + } + return false +} diff --git a/cmd/routed-eni-cni-plugin/driver/utils_test.go b/cmd/routed-eni-cni-plugin/driver/utils_test.go new file mode 100644 index 0000000000..e660c43a20 --- /dev/null +++ b/cmd/routed-eni-cni-plugin/driver/utils_test.go @@ -0,0 +1,204 @@ +package driver + +import ( + "syscall" + "testing" + + mock_netlinkwrapper "github.com/aws/amazon-vpc-cni-k8s/pkg/netlinkwrapper/mocks" + "github.com/golang/mock/gomock" + "github.com/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/vishvananda/netlink" +) + +func Test_netLinkRuleDelAll(t *testing.T) { + testRule := netlink.NewRule() + testRule.IifName = "eni00bcc08c834" + testRule.Priority = vlanRulePriority + + type ruleDelCall struct { + rule *netlink.Rule + err error + } + + type fields struct { + ruleDelCalls []ruleDelCall + } + + type args struct { + rule *netlink.Rule + } + tests := []struct { + name string + fields fields + args args + wantErr error + }{ + { + name: "single rule, succeed to delete", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: testRule, + }, + { + rule: testRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + rule: testRule, + }, + }, + { + name: "single rule, failed to delete", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: testRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + rule: testRule, + }, + wantErr: errors.New("some error"), + }, + { + name: "multiple rules, succeed to delete", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: testRule, + }, + { + rule: testRule, + }, + { + rule: testRule, + err: syscall.ENOENT, + }, + }, + }, + args: args{ + rule: testRule, + }, + }, + { + name: "multiple rules, failed to delete", + fields: fields{ + ruleDelCalls: []ruleDelCall{ + { + rule: testRule, + }, + { + rule: testRule, + err: errors.New("some error"), + }, + }, + }, + args: args{ + rule: testRule, + }, + wantErr: errors.New("some error"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + netLink := mock_netlinkwrapper.NewMockNetLink(ctrl) + for _, call := range tt.fields.ruleDelCalls { + netLink.EXPECT().RuleDel(call.rule).Return(call.err) + } + + err := netLinkRuleDelAll(netLink, tt.args.rule) + if tt.wantErr != nil { + assert.EqualError(t, err, tt.wantErr.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func Test_containsNoSuchRule(t *testing.T) { + type args struct { + err error + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "syscall.EEXIST is rule not exists error", + args: args{ + err: syscall.ENOENT, + }, + want: true, + }, + { + name: "syscall.ENOENT isn't rule not exists error", + args: args{ + err: syscall.EEXIST, + }, + want: false, + }, + { + name: "non syscall error isn't rule not exists error", + args: args{ + err: errors.New("some error"), + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := containsNoSuchRule(tt.args.err) + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_isRuleExistsError(t *testing.T) { + type args struct { + err error + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "syscall.EEXIST is rule exists error", + args: args{ + err: syscall.EEXIST, + }, + want: true, + }, + { + name: "syscall.ENOENT isn't rule exists error", + args: args{ + err: syscall.ENOENT, + }, + want: false, + }, + { + name: "non syscall error isn't rule exists error", + args: args{ + err: errors.New("some error"), + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isRuleExistsError(tt.args.err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/misc/10-aws.conflist b/misc/10-aws.conflist index 4acbfc490b..b93661eb8c 100644 --- a/misc/10-aws.conflist +++ b/misc/10-aws.conflist @@ -8,6 +8,7 @@ "type": "aws-cni", "vethPrefix": "__VETHPREFIX__", "mtu": "__MTU__", + "podSGEnforcingMode": "__PODSGENFORCINGMODE__", "pluginLogFile": "__PLUGINLOGFILE__", "pluginLogLevel": "__PLUGINLOGLEVEL__" }, diff --git a/pkg/networkutils/network.go b/pkg/networkutils/network.go index 5ace45c2ff..a49935aecb 100644 --- a/pkg/networkutils/network.go +++ b/pkg/networkutils/network.go @@ -27,6 +27,8 @@ import ( "syscall" "time" + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/aws/amazon-vpc-cni-k8s/pkg/utils/retry" "k8s.io/apimachinery/pkg/util/sets" @@ -139,7 +141,6 @@ type NetworkAPIs interface { GetRuleList() ([]netlink.Rule, error) GetRuleListBySrc(ruleList []netlink.Rule, src net.IPNet) ([]netlink.Rule, error) UpdateRuleListBySrc(ruleList []netlink.Rule, src net.IPNet) error - DeleteRuleListBySrc(src net.IPNet) error GetLinkByMac(mac string, retryInterval time.Duration) (netlink.Link, error) } @@ -151,6 +152,7 @@ type linuxNetwork struct { shouldConfigureRpFilter bool mtu int vethPrefix string + podSGEnforcingMode sgpp.EnforcingMode netLink netlinkwrapper.NetLink ns nswrapper.NS @@ -191,6 +193,7 @@ func New() NetworkAPIs { mainENIMark: getConnmark(), mtu: GetEthernetMTU(""), vethPrefix: getVethPrefixName(), + podSGEnforcingMode: sgpp.LoadEnforcingModeFromEnv(), netLink: netlinkwrapper.NewNetLink(), ns: nswrapper.NewNS(), @@ -349,7 +352,7 @@ func (n *linuxNetwork) SetupHostNetwork(vpcv4CIDRs []string, primaryMAC string, // or the rp_filter check will fail. // Note: Per Pod Security Group is not supported for V6 yet. So, cordoning off the PPSG rule (for now) // with v4 specific check. - if v4Enabled && enablePodENI { + if v4Enabled && enablePodENI && n.podSGEnforcingMode == sgpp.EnforcingModeStrict { localRule := n.netLink.NewRule() localRule.Table = localRouteTable localRule.Priority = localRulePriority @@ -1066,38 +1069,6 @@ func (n *linuxNetwork) GetRuleListBySrc(ruleList []netlink.Rule, src net.IPNet) return srcRuleList, nil } -// DeleteRuleListBySrc deletes IP rules that have a matching source IP -func (n *linuxNetwork) DeleteRuleListBySrc(src net.IPNet) error { - log.Infof("Delete Rule List By Src [%v]", src) - - ruleList, err := n.GetRuleList() - if err != nil { - log.Errorf("DeleteRuleListBySrc: failed to get rule list %v", err) - return err - } - - srcRuleList, err := n.GetRuleListBySrc(ruleList, src) - if err != nil { - log.Errorf("DeleteRuleListBySrc: failed to retrieve rule list %v", err) - return err - } - - log.Infof("Remove current list [%v]", srcRuleList) - for _, rule := range srcRuleList { - if err := n.netLink.RuleDel(&rule); err != nil && !containsNoSuchRule(err) { - log.Errorf("Failed to cleanup old IP rule: %v", err) - return errors.Wrapf(err, "DeleteRuleListBySrc: failed to delete old rule") - } - - var toDst string - if rule.Dst != nil { - toDst = rule.Dst.String() - } - log.Debugf("DeleteRuleListBySrc: Successfully removed current rule [%v] to %s", rule, toDst) - } - return nil -} - // UpdateRuleListBySrc modify IP rules that have a matching source IP func (n *linuxNetwork) UpdateRuleListBySrc(ruleList []netlink.Rule, src net.IPNet) error { log.Debugf("Update Rule List[%v] for source[%v] ", ruleList, src) diff --git a/pkg/networkutils/network_test.go b/pkg/networkutils/network_test.go index 2def29ed03..e199f9aa5d 100644 --- a/pkg/networkutils/network_test.go +++ b/pkg/networkutils/network_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp" + "github.com/coreos/go-iptables/iptables" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" @@ -831,6 +833,7 @@ func TestSetupHostNetworkUpdateLocalRule(t *testing.T) { shouldConfigureRpFilter: false, mainENIMark: defaultConnmark, mtu: testMTU, + podSGEnforcingMode: sgpp.EnforcingModeStrict, vethPrefix: eniPrefix, netLink: mockNetLink, diff --git a/pkg/sgpp/constants.go b/pkg/sgpp/constants.go new file mode 100644 index 0000000000..5895bf01f3 --- /dev/null +++ b/pkg/sgpp/constants.go @@ -0,0 +1,15 @@ +package sgpp + +type EnforcingMode string + +const ( + EnforcingModeStrict EnforcingMode = "strict" + EnforcingModeStandard EnforcingMode = "standard" +) + +const ( + // DefaultEnforcingMode is the default enforcing mode if not specified explicitly. + DefaultEnforcingMode EnforcingMode = EnforcingModeStrict + // environment variable knob to decide EnforcingMode for SGPP feature. + envEnforcingMode = "POD_SECURITY_GROUP_ENFORCING_MODE" +) diff --git a/pkg/sgpp/utils.go b/pkg/sgpp/utils.go new file mode 100644 index 0000000000..b76310a418 --- /dev/null +++ b/pkg/sgpp/utils.go @@ -0,0 +1,32 @@ +package sgpp + +import "os" + +const vlanInterfacePrefix = "vlan" + +// BuildHostVethNamePrefix computes the name prefix for host-side veth pairs for SGPP pods +// for the "standard" mode, we use the same hostVethNamePrefix as normal pods, which is "eni" by default, but can be overwritten as well. +// for the "strict" mode, we use dedicated "vlan" hostVethNamePrefix, which is to opt-out SNAT support and opt-out calico's workload management. +func BuildHostVethNamePrefix(hostVethNamePrefix string, podSGEnforcingMode EnforcingMode) string { + switch podSGEnforcingMode { + case EnforcingModeStrict: + return vlanInterfacePrefix + case EnforcingModeStandard: + return hostVethNamePrefix + default: + return vlanInterfacePrefix + } +} + +// LoadEnforcingModeFromEnv tries to load the enforcing mode from environment variable and fall-back to DefaultEnforcingMode. +func LoadEnforcingModeFromEnv() EnforcingMode { + envVal, _ := os.LookupEnv(envEnforcingMode) + switch envVal { + case string(EnforcingModeStrict): + return EnforcingModeStrict + case string(EnforcingModeStandard): + return EnforcingModeStandard + default: + return DefaultEnforcingMode + } +} diff --git a/pkg/sgpp/utils_test.go b/pkg/sgpp/utils_test.go new file mode 100644 index 0000000000..9ce01bb839 --- /dev/null +++ b/pkg/sgpp/utils_test.go @@ -0,0 +1,112 @@ +package sgpp + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBuildHostVethNamePrefix(t *testing.T) { + type args struct { + hostVethNamePrefix string + podSGEnforcingMode EnforcingMode + } + tests := []struct { + name string + args args + want string + }{ + { + name: "standard mode should use configured vethNamePrefix", + args: args{ + hostVethNamePrefix: "eni", + podSGEnforcingMode: EnforcingModeStandard, + }, + want: "eni", + }, + { + name: "strict mode should use vlan vethNamePrefix", + args: args{ + hostVethNamePrefix: "eni", + podSGEnforcingMode: EnforcingModeStrict, + }, + want: "vlan", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := BuildHostVethNamePrefix(tt.args.hostVethNamePrefix, tt.args.podSGEnforcingMode) + assert.Equal(t, tt.want, got) + }) + } +} + +func TestLoadEnforcingModeFromEnv(t *testing.T) { + type fields struct { + envVars map[string]string + } + tests := []struct { + name string + fields fields + want EnforcingMode + }{ + { + name: "use strict mode when POD_SECURITY_GROUP_ENFORCING_MODE set to strict", + fields: fields{ + envVars: map[string]string{ + "POD_SECURITY_GROUP_ENFORCING_MODE": "strict", + }, + }, + want: EnforcingModeStrict, + }, + { + name: "use standard mode when POD_SECURITY_GROUP_ENFORCING_MODE set to standard", + fields: fields{ + envVars: map[string]string{ + "POD_SECURITY_GROUP_ENFORCING_MODE": "standard", + }, + }, + want: EnforcingModeStandard, + }, + { + name: "default to strict mode when POD_SECURITY_GROUP_ENFORCING_MODE not set", + fields: fields{}, + want: EnforcingModeStrict, + }, + { + name: "default to strict mode when POD_SECURITY_GROUP_ENFORCING_MODE incorrectly configured", + fields: fields{ + envVars: map[string]string{ + "POD_SECURITY_GROUP_ENFORCING_MODE": "unknown", + }, + }, + want: EnforcingModeStrict, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + originalEnvVars := make(map[string]string) + for k, _ := range tt.fields.envVars { + originalV, _ := os.LookupEnv(k) + originalEnvVars[k] = originalV + } + defer func() { + for k, v := range originalEnvVars { + if len(v) != 0 { + os.Setenv(k, v) + } else { + os.Unsetenv(k) + } + } + }() + + for k, v := range tt.fields.envVars { + os.Setenv(k, v) + } + + got := LoadEnforcingModeFromEnv() + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/utils/cniutils/cni_utils.go b/pkg/utils/cniutils/cni_utils.go new file mode 100644 index 0000000000..15969169a8 --- /dev/null +++ b/pkg/utils/cniutils/cni_utils.go @@ -0,0 +1,22 @@ +package cniutils + +import "github.com/containernetworking/cni/pkg/types/current" + +func FindInterfaceByName(ifaceList []*current.Interface, ifaceName string) (ifaceIndex int, iface *current.Interface, found bool) { + for ifaceIndex, iface := range ifaceList { + if iface.Name == ifaceName { + return ifaceIndex, iface, true + } + } + return 0, nil, false +} + +func FindIPConfigsByIfaceIndex(ipConfigs []*current.IPConfig, ifaceIndex int) []*current.IPConfig { + var matchedIPConfigs []*current.IPConfig + for _, ipConfig := range ipConfigs { + if ipConfig.Interface != nil && *ipConfig.Interface == ifaceIndex { + matchedIPConfigs = append(matchedIPConfigs, ipConfig) + } + } + return matchedIPConfigs +} diff --git a/pkg/utils/cniutils/cni_utils_test.go b/pkg/utils/cniutils/cni_utils_test.go new file mode 100644 index 0000000000..023176a1a3 --- /dev/null +++ b/pkg/utils/cniutils/cni_utils_test.go @@ -0,0 +1,210 @@ +package cniutils + +import ( + "net" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/containernetworking/cni/pkg/types/current" + "github.com/stretchr/testify/assert" +) + +func Test_FindInterfaceByName(t *testing.T) { + type args struct { + ifaceList []*current.Interface + ifaceName string + } + tests := []struct { + name string + args args + wantIfaceIndex int + wantIface *current.Interface + wantFound bool + }{ + { + name: "found the CNI interface at index 0", + args: args{ + ifaceList: []*current.Interface{ + { + Name: "eni8ea2c11fe35", + }, + { + Name: "eth0", + }, + }, + ifaceName: "eni8ea2c11fe35", + }, + wantIfaceIndex: 0, + wantIface: ¤t.Interface{ + Name: "eni8ea2c11fe35", + }, + wantFound: true, + }, + { + name: "found the CNI interface at index 1", + args: args{ + ifaceList: []*current.Interface{ + { + Name: "eth0", + }, + { + Name: "eni8ea2c11fe35", + }, + }, + ifaceName: "eni8ea2c11fe35", + }, + wantIfaceIndex: 1, + wantIface: ¤t.Interface{ + Name: "eni8ea2c11fe35", + }, + wantFound: true, + }, + { + name: "didn't found CNI interface", + args: args{ + ifaceList: []*current.Interface{ + { + Name: "eth0", + }, + { + Name: "eni8ea2c11fe35", + }, + }, + ifaceName: "enixxxxx", + }, + wantFound: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotIfaceIndex, gotIface, gotFound := FindInterfaceByName(tt.args.ifaceList, tt.args.ifaceName) + assert.Equal(t, tt.wantFound, gotFound) + if tt.wantFound { + assert.Equal(t, tt.wantIfaceIndex, gotIfaceIndex) + assert.Equal(t, tt.wantIface, gotIface) + } + }) + } +} + +func Test_FindIPConfigsByIfaceIndex(t *testing.T) { + type args struct { + ipConfigs []*current.IPConfig + ifaceIndex int + } + tests := []struct { + name string + args args + want []*current.IPConfig + }{ + { + name: "single matched IPConfig", + args: args{ + ipConfigs: []*current.IPConfig{ + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + { + Interface: aws.Int(2), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.2"), + }, + }, + }, + ifaceIndex: 1, + }, + want: []*current.IPConfig{ + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + }, + }, + { + name: "multiple matched IPConfig", + args: args{ + ipConfigs: []*current.IPConfig{ + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.2"), + }, + }, + { + Interface: aws.Int(2), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.3"), + }, + }, + }, + ifaceIndex: 1, + }, + want: []*current.IPConfig{ + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + { + Interface: aws.Int(1), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.2"), + }, + }, + }, + }, + { + name: "none matched IPConfig", + args: args{ + ipConfigs: []*current.IPConfig{ + { + Interface: aws.Int(2), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + { + Interface: aws.Int(2), + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.2"), + }, + }, + }, + ifaceIndex: 1, + }, + want: nil, + }, + { + name: "interface is not set", + args: args{ + ipConfigs: []*current.IPConfig{ + { + Address: net.IPNet{ + IP: net.ParseIP("192.168.1.1"), + }, + }, + }, + ifaceIndex: 1, + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := FindIPConfigsByIfaceIndex(tt.args.ipConfigs, tt.args.ifaceIndex) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/scripts/entrypoint.sh b/scripts/entrypoint.sh index 42ea714b8a..01dc61468d 100755 --- a/scripts/entrypoint.sh +++ b/scripts/entrypoint.sh @@ -72,6 +72,16 @@ validate_env_var() exit 1 ;; esac + + case ${POD_SECURITY_GROUP_ENFORCING_MODE} in + strict|standard) + ;; + *) + log_in_json error "POD_SECURITY_GROUP_ENFORCING_MODE must be set to either strict or standard" + exit 1 + ;; + esac + if is_prefix_delegation_enabled && unsupported_prefix_target_conf ; then log_in_json error "Setting WARM_PREFIX_TARGET = 0 is not supported while WARM_IP_TARGET/MINIMUM_IP_TARGET is not set. Please configure either one of the WARM_{PREFIX/IP}_TARGET or MINIMUM_IP_TARGET env variables" exit 1 @@ -93,6 +103,7 @@ HOST_CNI_BIN_PATH=${HOST_CNI_BIN_PATH:-"/host/opt/cni/bin"} HOST_CNI_CONFDIR_PATH=${HOST_CNI_CONFDIR_PATH:-"/host/etc/cni/net.d"} AWS_VPC_K8S_CNI_VETHPREFIX=${AWS_VPC_K8S_CNI_VETHPREFIX:-"eni"} AWS_VPC_ENI_MTU=${AWS_VPC_ENI_MTU:-"9001"} +POD_SECURITY_GROUP_ENFORCING_MODE=${POD_SECURITY_GROUP_ENFORCING_MODE:-"strict"} AWS_VPC_K8S_PLUGIN_LOG_FILE=${AWS_VPC_K8S_PLUGIN_LOG_FILE:-"/var/log/aws-routed-eni/plugin.log"} AWS_VPC_K8S_PLUGIN_LOG_LEVEL=${AWS_VPC_K8S_PLUGIN_LOG_LEVEL:-"Debug"} AWS_VPC_K8S_EGRESS_V4_PLUGIN_LOG_FILE=${AWS_VPC_K8S_EGRESS_V4_PLUGIN_LOG_FILE:-"/var/log/aws-routed-eni/egress-v4-plugin.log"} @@ -170,6 +181,7 @@ log_in_json info "Copying config file ... " sed \ -e s~__VETHPREFIX__~"${AWS_VPC_K8S_CNI_VETHPREFIX}"~g \ -e s~__MTU__~"${AWS_VPC_ENI_MTU}"~g \ + -e s~__PODSGENFORCINGMODE__~"${POD_SECURITY_GROUP_ENFORCING_MODE}"~g \ -e s~__PLUGINLOGFILE__~"${AWS_VPC_K8S_PLUGIN_LOG_FILE}"~g \ -e s~__PLUGINLOGLEVEL__~"${AWS_VPC_K8S_PLUGIN_LOG_LEVEL}"~g \ -e s~__EGRESSV4PLUGINLOGFILE__~"${AWS_VPC_K8S_EGRESS_V4_PLUGIN_LOG_FILE}"~g \