Skip to content

Commit

Permalink
support new SGPP standard mode
Browse files Browse the repository at this point in the history
  • Loading branch information
M00nF1sh committed Mar 7, 2022
1 parent c93b255 commit a8c14e8
Show file tree
Hide file tree
Showing 11 changed files with 2,695 additions and 862 deletions.
123 changes: 77 additions & 46 deletions cmd/routed-eni-cni-plugin/cni.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"github.com/aws/amazon-vpc-cni-k8s/pkg/sgpp"
"net"
"os"
"runtime"
Expand All @@ -45,7 +46,6 @@ import (

const ipamdAddress = "127.0.0.1:50051"

const vlanInterfacePrefix = "vlan"
const dummyVlanInterfacePrefix = "dummy"

var version string
Expand All @@ -62,6 +62,9 @@ type NetConf struct {
// MTU for eth0
MTU string `json:"mtu"`

// PodSGEnforcingMode is the enforcing mode for Security groups for pods feature
PodSGEnforcingMode sgpp.EnforcingMode `json:"podSGEnforcingMode"`

PluginLogFile string `json:"pluginLogFile"`

PluginLogLevel string `json:"pluginLogLevel"`
Expand Down Expand Up @@ -91,8 +94,9 @@ func init() {
func LoadNetConf(bytes []byte) (*NetConf, logger.Logger, error) {
// Default config
conf := NetConf{
MTU: "9001",
VethPrefix: "eni",
MTU: "9001",
VethPrefix: "eni",
PodSGEnforcingMode: sgpp.DefaultEnforcingMode,
}

if err := json.Unmarshal(bytes, &conf); err != nil {
Expand Down Expand Up @@ -208,9 +212,10 @@ func add(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap

// Non-zero value means pods are using branch ENI
if r.PodVlanId != 0 {
hostVethName = generateHostVethName(vlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
err = driverClient.SetupPodENINetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.PodVlanId), r.PodENIMAC,
r.PodENISubnetGW, int(r.ParentIfIndex), mtu, log)
hostVethNamePrefix := sgpp.BuildHostVethNamePrefix(conf.VethPrefix, conf.PodSGEnforcingMode)
hostVethName = generateHostVethName(hostVethNamePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
err = driverClient.SetupBranchENIPodNetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.PodVlanId), r.PodENIMAC,
r.PodENISubnetGW, int(r.ParentIfIndex), mtu, conf.PodSGEnforcingMode, log)

// This is a dummyVlanInterfaceName generated to identify dummyVlanInterface
// which will be created for PPSG scenario to pass along the vlanId information
Expand All @@ -226,7 +231,7 @@ func add(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap
// build hostVethName
// Note: the maximum length for linux interface name is 15
hostVethName = generateHostVethName(conf.VethPrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
err = driverClient.SetupNS(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.DeviceNumber), r.VPCv4CIDRs, r.UseExternalSNAT, mtu, log)
err = driverClient.SetupPodNetwork(hostVethName, args.IfName, args.Netns, v4Addr, v6Addr, int(r.DeviceNumber), mtu, log)
}

if err != nil {
Expand Down Expand Up @@ -322,37 +327,12 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap
log.Info("Netns() is empty, so network already cleanedup. Nothing to do")
return nil
}
prevResult, ok := conf.PrevResult.(*current.Result)

// Try to use prevResult if available
// prevResult might not be availabe, if we are still using older cni spec < 0.4.0.
// So we should fallback to the old clean up method
if ok {
dummyVlanInterfaceName := generateHostVethName(dummyVlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
for _, iface := range prevResult.Interfaces {
if iface.Name == dummyVlanInterfaceName {
podVlanId, err := strconv.Atoi(iface.Mac)
if err != nil {
log.Errorf("Failed to parse vlanId from prevResult: %v", err)
return errors.Wrap(err, "del cmd: failed to parse vlanId from prevResult")
}

// podVlanID can not be 0 as we add dummyVlanInterface only for ppsg
// if it is 0 then we should return an error
if podVlanId == 0 {
log.Errorf("Found SG pod:%s namespace:%s with 0 vlanID", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE)
return errors.Wrap(err, "del cmd: found Incorrect 0 vlandId for ppsg")
}

err = cleanUpPodENI(podVlanId, log, args.ContainerID, driverClient)
if err != nil {
return err
}
log.Infof("Received del network response for pod %s namespace %s sandbox %s with vlanId: %v", string(k8sArgs.K8S_POD_NAME),
string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), podVlanId)
return nil
}
}
handled, err := tryDelWithPrevResult(driverClient, conf, k8sArgs, args.IfName, log)
if err != nil {
return errors.Wrap(err, "del cmd: failed to delete with prevResult")
}
if handled {
return nil
}

// notify local IP address manager to free secondary IP
Expand Down Expand Up @@ -418,9 +398,9 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap
}

if r.PodVlanId != 0 {
err = driverClient.TeardownPodENINetwork(int(r.PodVlanId), log)
err = driverClient.TeardownBranchENIPodNetwork(addr, int(r.PodVlanId), conf.PodSGEnforcingMode, log)
} else {
err = driverClient.TeardownNS(addr, int(r.DeviceNumber), log)
err = driverClient.TeardownPodNetwork(addr, int(r.DeviceNumber), log)
}

if err != nil {
Expand All @@ -434,14 +414,65 @@ func del(args *skel.CmdArgs, cniTypes typeswrapper.CNITYPES, grpcClient grpcwrap
return nil
}

func cleanUpPodENI(podVlanId int, log logger.Logger, containerId string, driverClient driver.NetworkAPIs) error {
err := driverClient.TeardownPodENINetwork(podVlanId, log)
// tryDelWithPrevResult will try to process CNI delete request without IPAMD.
// returns true if the del request is handled.
func tryDelWithPrevResult(driverClient driver.NetworkAPIs, conf *NetConf, k8sArgs K8sArgs, contVethName string, log logger.Logger) (bool, error) {
// prevResult might not be available, if we are still using older cni spec < 0.4.0.
prevResult, ok := conf.PrevResult.(*current.Result)
if !ok {
return false, nil
}

dummyIfaceName := generateHostVethName(dummyVlanInterfacePrefix, string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_NAME))
_, dummyIface, found := findCNIInterfaceByName(prevResult.Interfaces, dummyIfaceName)
if !found {
return false, nil
}

podVlanID, err := strconv.Atoi(dummyIface.Mac)
if err != nil {
log.Errorf("Failed on TeardownPodNetwork for container ID %s: %v",
containerId, err)
return errors.Wrap(err, "del cmd: failed on tear down pod network")
log.Errorf("Failed to parse vlanId from prevResult: %v", err)
return true, errors.Wrap(err, "failed to parse vlanID from prevResult")
}
return nil
if podVlanID == 0 {
log.Errorf("Found SG pod:%s namespace:%s with 0 vlanID", k8sArgs.K8S_POD_NAME, k8sArgs.K8S_POD_NAMESPACE)
return true, errors.Wrap(err, "found Incorrect 0 vlanID")
}

containerIfaceIndex, _, found := findCNIInterfaceByName(prevResult.Interfaces, contVethName)
if !found {
return false, nil
}
containerIPs := findCNIIPConfigsByIfaceIndex(prevResult.IPs, containerIfaceIndex)
if len(containerIPs) != 1 {
return false, nil
}
containerIP := containerIPs[0].Address
if err := driverClient.TeardownBranchENIPodNetwork(&containerIP, podVlanID, conf.PodSGEnforcingMode, log); err != nil {
return true, err
}
log.Infof("Received del network response for pod %s namespace %s sandbox %s with vlanID: %v", string(k8sArgs.K8S_POD_NAME),
string(k8sArgs.K8S_POD_NAMESPACE), string(k8sArgs.K8S_POD_INFRA_CONTAINER_ID), podVlanID)
return true, nil
}

func findCNIInterfaceByName(ifaceList []*current.Interface, ifaceName string) (ifaceIndex int, iface *current.Interface, found bool) {
for ifaceIndex, iface := range ifaceList {
if iface.Name == ifaceName {
return ifaceIndex, iface, true
}
}
return 0, nil, false
}

func findCNIIPConfigsByIfaceIndex(ipConfigs []*current.IPConfig, ifaceIndex int) []*current.IPConfig {
var matchedIPConfigs []*current.IPConfig
for _, ipConfig := range ipConfigs {
if ipConfig.Interface != nil && *ipConfig.Interface == ifaceIndex {
matchedIPConfigs = append(matchedIPConfigs, ipConfig)
}
}
return matchedIPConfigs
}

func main() {
Expand Down
Loading

0 comments on commit a8c14e8

Please sign in to comment.