diff --git a/cmd/yurt-controller-manager/app/controllermanager.go b/cmd/yurt-controller-manager/app/controllermanager.go index 9bcfc9b9d16..58724e8d3cc 100644 --- a/cmd/yurt-controller-manager/app/controllermanager.go +++ b/cmd/yurt-controller-manager/app/controllermanager.go @@ -49,11 +49,11 @@ import ( "k8s.io/client-go/tools/leaderelection/resourcelock" cliflag "k8s.io/component-base/cli/flag" "k8s.io/component-base/cli/globalflag" + "k8s.io/component-base/version" "k8s.io/klog" genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app" "k8s.io/kubernetes/pkg/controller" utilflag "k8s.io/kubernetes/pkg/util/flag" - "k8s.io/kubernetes/pkg/version" ) const ( diff --git a/cmd/yurt-controller-manager/app/core.go b/cmd/yurt-controller-manager/app/core.go index 237ad2efba6..1cf4a341dde 100644 --- a/cmd/yurt-controller-manager/app/core.go +++ b/cmd/yurt-controller-manager/app/core.go @@ -26,13 +26,11 @@ import ( "time" lifecyclecontroller "github.com/openyurtio/openyurt/pkg/controller/nodelifecycle" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/features" ) func startNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, error) { lifecycleController, err := lifecyclecontroller.NewNodeLifecycleController( - ctx.InformerFactory.Coordination().V1beta1().Leases(), + ctx.InformerFactory.Coordination().V1().Leases(), ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Core().V1().Nodes(), ctx.InformerFactory.Apps().V1().DaemonSets(), @@ -48,8 +46,6 @@ func startNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, er ctx.ComponentConfig.NodeLifecycleController.LargeClusterSizeThreshold, ctx.ComponentConfig.NodeLifecycleController.UnhealthyZoneThreshold, ctx.ComponentConfig.NodeLifecycleController.EnableTaintManager, - utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions), - utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition), ) if err != nil { return nil, true, err diff --git a/cmd/yurt-controller-manager/controller-manager.go b/cmd/yurt-controller-manager/controller-manager.go index d54212fd3eb..157f6de5380 100644 --- a/cmd/yurt-controller-manager/controller-manager.go +++ b/cmd/yurt-controller-manager/controller-manager.go @@ -28,8 +28,8 @@ import ( "github.com/openyurtio/openyurt/cmd/yurt-controller-manager/app" "k8s.io/component-base/logs" - _ "k8s.io/kubernetes/pkg/util/prometheusclientgo" // load all the prometheus client-go plugin - _ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration + _ "k8s.io/component-base/metrics/prometheus/clientgo" // load all the prometheus client-go plugin + _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration ) func main() { diff --git a/cmd/yurthub/app/config/config.go b/cmd/yurthub/app/config/config.go index cfdd03de0b9..3325ae90a17 100644 --- a/cmd/yurthub/app/config/config.go +++ b/cmd/yurthub/app/config/config.go @@ -24,6 +24,9 @@ import ( "github.com/openyurtio/openyurt/cmd/yurthub/app/options" "github.com/openyurtio/openyurt/pkg/projectinfo" + "github.com/openyurtio/openyurt/pkg/yurthub/cachemanager" + "github.com/openyurtio/openyurt/pkg/yurthub/kubernetes/serializer" + "github.com/openyurtio/openyurt/pkg/yurthub/storage/factory" "k8s.io/klog" ) @@ -48,6 +51,8 @@ type YurtHubConfiguration struct { EnableDummyIf bool EnableIptables bool HubAgentDummyIfName string + StorageWrapper cachemanager.StorageWrapper + SerializerManager *serializer.SerializerManager } // Complete converts *options.YurtHubOptions to *YurtHubConfiguration @@ -57,6 +62,14 @@ func Complete(options *options.YurtHubOptions) (*YurtHubConfiguration, error) { return nil, err } + storageManager, err := factory.CreateStorage() + if err != nil { + klog.Errorf("could not create storage manager, %v", err) + return nil, err + } + storageWrapper := cachemanager.NewStorageWrapper(storageManager) + serializerManager := serializer.NewSerializerManager() + hubServerAddr := net.JoinHostPort(options.YurtHubHost, options.YurtHubPort) proxyServerAddr := net.JoinHostPort(options.YurtHubHost, options.YurtHubProxyPort) proxyServerDummyAddr := net.JoinHostPort(options.HubAgentDummyIfIP, options.YurtHubProxyPort) @@ -79,6 +92,8 @@ func Complete(options *options.YurtHubOptions) (*YurtHubConfiguration, error) { EnableDummyIf: options.EnableDummyIf, EnableIptables: options.EnableIptables, HubAgentDummyIfName: options.HubAgentDummyIfName, + StorageWrapper: storageWrapper, + SerializerManager: serializerManager, } return cfg, nil diff --git a/cmd/yurthub/app/start.go b/cmd/yurthub/app/start.go index 47e29f387fb..137dfe8cc13 100644 --- a/cmd/yurthub/app/start.go +++ b/cmd/yurthub/app/start.go @@ -28,11 +28,9 @@ import ( "github.com/openyurtio/openyurt/pkg/yurthub/certificate/kubelet" "github.com/openyurtio/openyurt/pkg/yurthub/gc" "github.com/openyurtio/openyurt/pkg/yurthub/healthchecker" - "github.com/openyurtio/openyurt/pkg/yurthub/kubernetes/serializer" "github.com/openyurtio/openyurt/pkg/yurthub/network" "github.com/openyurtio/openyurt/pkg/yurthub/proxy" "github.com/openyurtio/openyurt/pkg/yurthub/server" - "github.com/openyurtio/openyurt/pkg/yurthub/storage/factory" "github.com/openyurtio/openyurt/pkg/yurthub/transport" "github.com/spf13/cobra" @@ -81,35 +79,6 @@ func NewCmdStartYurtHub(stopCh <-chan struct{}) *cobra.Command { // Run runs the YurtHubConfiguration. This should never exit func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { trace := 1 - klog.Infof("%d. new transport manager for healthz client", trace) - transportManager, err := transport.NewTransportManager(cfg.HeartbeatTimeoutSeconds, stopCh) - if err != nil { - klog.Errorf("could not new transport manager, %v", err) - return err - } - trace++ - - klog.Infof("%d. create storage manager", trace) - storageManager, err := factory.CreateStorage() - if err != nil { - klog.Errorf("could not create storage manager, %v", err) - return err - } - storageWrapper := cachemanager.NewStorageWrapper(storageManager) - trace++ - - klog.Infof("%d. new serializer manager", trace) - serializerManager := serializer.NewSerializerManager() - trace++ - - klog.Infof("%d. new cache manager with storage wrapper and serializer manager", trace) - cacheMgr, err := cachemanager.NewCacheManager(storageWrapper, serializerManager) - if err != nil { - klog.Errorf("could not new cache manager, %v", err) - return err - } - trace++ - klog.Infof("%d. register cert managers", trace) cmr := certificate.NewCertificateManagerRegistry() kubelet.Register(cmr) @@ -124,16 +93,16 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { } trace++ - klog.Infof("%d. update transport manager", trace) - err = transportManager.UpdateTransport(certManager) + klog.Infof("%d. new transport manager", trace) + transportManager, err := transport.NewTransportManager(certManager, stopCh) if err != nil { - klog.Errorf("could not update transport manager, %v", err) + klog.Errorf("could not new transport manager, %v", err) return err } trace++ klog.Infof("%d. create health checker for remote servers ", trace) - healthChecker, err := healthchecker.NewHealthChecker(cfg, transportManager, storageWrapper, stopCh) + healthChecker, err := healthchecker.NewHealthChecker(cfg, transportManager, stopCh) if err != nil { klog.Errorf("could not new health checker, %v", err) return err @@ -141,8 +110,16 @@ func Run(cfg *config.YurtHubConfiguration, stopCh <-chan struct{}) error { healthChecker.Run() trace++ + klog.Infof("%d. new cache manager with storage wrapper and serializer manager", trace) + cacheMgr, err := cachemanager.NewCacheManager(cfg.StorageWrapper, cfg.SerializerManager) + if err != nil { + klog.Errorf("could not new cache manager, %v", err) + return err + } + trace++ + klog.Infof("%d. new gc manager for node %s, and gc frequency is a random time between %d min and %d min", trace, cfg.NodeName, cfg.GCFrequency, 3*cfg.GCFrequency) - gcMgr, err := gc.NewGCManager(cfg, storageManager, transportManager, stopCh) + gcMgr, err := gc.NewGCManager(cfg, transportManager, stopCh) if err != nil { klog.Errorf("could not new gc manager, %v", err) return err diff --git a/docs/proposals/20210301-enhancement_of_YurtHub_caching_ability.md b/docs/proposals/20210301-enhancement_of_YurtHub_caching_ability.md index 89236b51038..4c7300f6115 100644 --- a/docs/proposals/20210301-enhancement_of_YurtHub_caching_ability.md +++ b/docs/proposals/20210301-enhancement_of_YurtHub_caching_ability.md @@ -140,7 +140,7 @@ CacheManager provides methods for managing resources cached on edge nodes, which ```go // CacheManager is an adaptor to cache runtime object data into backend storage type CacheManager interface { - CacheResponse(ctx context.Context, prc io.ReadCloser, stopCh <-chan struct{}) error + CacheResponse(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) error QueryCache(req *http.Request) (runtime.Object, error) UpdateCacheAgents(agents []string) error ListCacheAgents() []string @@ -440,4 +440,4 @@ func (hl *HandlerLayer) SelectAndProcess(obj runtime.Object) (runtime.Object, bo - [ ] 03/01/2021: Proposed idea. - [ ] 03/01/2021: Commit the PR about delete [resourceToKindMap](https://github.com/openyurtio/openyurt/blob/4d7463a40801c29d09c4f7d10ba46b73cb019915/pkg/yurthub/cachemanager/cache_manager.go#L46) and [resourceToListKindMap](https://github.com/openyurtio/openyurt/blob/4d7463a40801c29d09c4f7d10ba46b73cb019915/pkg/yurthub/cachemanager/cache_manager.go#L62) (https://github.com/openyurtio/openyurt/pull/225) -- [ ] 03/28/2021: Update this proposal \ No newline at end of file +- [ ] 03/28/2021: Update this proposal diff --git a/go.mod b/go.mod index d8e24393138..a0e3ec6ef9e 100644 --- a/go.mod +++ b/go.mod @@ -11,14 +11,12 @@ require ( github.com/emicklei/go-restful v2.12.0+incompatible // indirect github.com/evanphx/json-patch v4.5.0+incompatible // indirect github.com/go-openapi/spec v0.19.8 // indirect - github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.3.1 // indirect github.com/gorilla/mux v1.7.4 github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/imdario/mergo v0.3.9 // indirect github.com/json-iterator/go v1.1.10 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/onsi/ginkgo v1.13.0 github.com/onsi/gomega v1.10.1 github.com/opencontainers/go-digest v1.0.0 // indirect @@ -26,54 +24,53 @@ require ( github.com/prometheus/procfs v0.0.11 // indirect github.com/spf13/cobra v1.0.0 github.com/spf13/pflag v1.0.5 - github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e + github.com/vishvananda/netlink v1.0.0 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/text v0.3.3 // indirect golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect google.golang.org/grpc v1.27.0 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/square/go-jose.v2 v2.5.1 // indirect - k8s.io/api v0.17.7 - k8s.io/apiextensions-apiserver v0.17.7 // indirect - k8s.io/apimachinery v0.17.7 - k8s.io/apiserver v0.16.9 - k8s.io/client-go v0.17.7 + k8s.io/api v0.18.8 + k8s.io/apimachinery v0.18.8 + k8s.io/apiserver v0.18.8 + k8s.io/client-go v0.18.8 k8s.io/cluster-bootstrap v0.0.0 - k8s.io/component-base v0.16.9 + k8s.io/component-base v0.18.8 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.0.0 - k8s.io/kubernetes v1.18.3 + k8s.io/kubernetes v1.18.8 k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 sigs.k8s.io/apiserver-network-proxy v0.0.15 - sigs.k8s.io/yaml v1.2.0 // indirect ) replace ( github.com/prometheus/client_golang => github.com/prometheus/client_golang v0.9.2 google.golang.org/grpc v1.27.0 => google.golang.org/grpc v1.26.0 - k8s.io/api => k8s.io/api v0.16.9 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.16.9 - k8s.io/apimachinery => k8s.io/apimachinery v0.16.10-beta.0 - k8s.io/apiserver => k8s.io/apiserver v0.16.9 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.16.9 - k8s.io/client-go => k8s.io/client-go v0.16.9 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.16.9 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.16.9 - k8s.io/code-generator => k8s.io/code-generator v0.16.10-beta.0 - k8s.io/component-base => k8s.io/component-base v0.16.9 - k8s.io/cri-api => k8s.io/cri-api v0.16.13-rc.0 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.16.9 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.16.9 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.16.9 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.16.9 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.16.9 - k8s.io/kubectl => k8s.io/kubectl v0.16.9 - k8s.io/kubelet => k8s.io/kubelet v0.16.9 - k8s.io/kubernetes => github.com/kubernetes/kubernetes v1.16.9 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.16.9 - k8s.io/metrics => k8s.io/metrics v0.16.9 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.16.9 - sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 + k8s.io/api => k8s.io/api v0.18.8 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.8 + k8s.io/apimachinery => k8s.io/apimachinery v0.18.9-rc.0 + k8s.io/apiserver => k8s.io/apiserver v0.18.8 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.8 + k8s.io/client-go => k8s.io/client-go v0.18.8 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.8 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.8 + k8s.io/code-generator => k8s.io/code-generator v0.18.18-rc.0 + k8s.io/component-base => k8s.io/component-base v0.18.8 + k8s.io/cri-api => k8s.io/cri-api v0.18.18-rc.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.8 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.8 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.8 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.8 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.8 + k8s.io/kubectl => k8s.io/kubectl v0.18.8 + k8s.io/kubelet => k8s.io/kubelet v0.18.8 + k8s.io/kubernetes => github.com/kubernetes/kubernetes v1.18.8 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.8 + k8s.io/metrics => k8s.io/metrics v0.18.8 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.8 + sigs.k8s.io/apiserver-network-proxy => github.com/openyurtio/apiserver-network-proxy v1.18.8 + sigs.k8s.io/apiserver-network-proxy/konnectivity-client => sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.5.7 sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v1.0.2 ) diff --git a/go.sum b/go.sum index abf203c1a84..3f9158190d1 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,7 @@ bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1: cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible h1:Hn/DsObfmw0M7dMGS/c0MlVrJuGFzHzOpBWL89acR68= -github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= @@ -27,8 +26,7 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= @@ -44,15 +42,20 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aliyun/alibaba-cloud-sdk-go v1.61.355 h1:EXv1UhH7JHOg/DsteHFmivc+lRAiTJBI744dJW8N6tw= github.com/aliyun/alibaba-cloud-sdk-go v1.61.355/go.mod h1:pUKYbK5JQ+1Dfxk80P0qxGqe5dkxDoabbZS7zOcouyA= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= @@ -62,6 +65,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLM github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -72,43 +76,42 @@ github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QH github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= +github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= +github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/cfssl v0.0.0-20180726162950-56268a613adf/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.1.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= +github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.2/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= +github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.17+incompatible h1:f/Z3EoDSx1yjaIjLQGo1diYUlQYSBrrAQ5vP8NjwXwo= -github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -124,13 +127,15 @@ github.com/docker/docker v17.12.0-ce-rc1.0.20200531234253-77e06fda0c94+incompati github.com/docker/docker v17.12.0-ce-rc1.0.20200531234253-77e06fda0c94+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.12.0+incompatible h1:SIvoTSbsMEwuM3dzFirLwKc4BH6VXP5CNf+G1FfJVr4= @@ -138,19 +143,19 @@ github.com/emicklei/go-restful v2.12.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQm github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -160,7 +165,9 @@ github.com/go-acme/lego v2.5.0+incompatible h1:5fNN9yRQfv8ymH3DSsxla+4aYeQt2IgfZ github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= @@ -168,6 +175,7 @@ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70t github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= @@ -182,21 +190,27 @@ github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= @@ -205,7 +219,9 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= @@ -220,15 +236,15 @@ github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Il github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4= -github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -237,6 +253,7 @@ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4er github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -278,8 +295,7 @@ github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA// github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= @@ -289,7 +305,6 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= @@ -303,6 +318,7 @@ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= @@ -312,19 +328,22 @@ github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51 github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -342,20 +361,23 @@ github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -367,6 +389,7 @@ github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM52 github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -374,8 +397,8 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kubernetes/kubernetes v1.16.9 h1:SNn5JAFCIFJcpq8urxnSMoGK87SAgrSPPmUmL/B7jcs= -github.com/kubernetes/kubernetes v1.16.9/go.mod h1:bpUsy1qP0W6EtkxrPluP02p2+wyVN+95lkjPKnLQZtc= +github.com/kubernetes/kubernetes v1.18.8 h1:wpG4RPPyuNRgQ/L8yEAHrsH1FKprqE8vPNLf92T9BGk= +github.com/kubernetes/kubernetes v1.18.8/go.mod h1:SU7bBi8ZNHRjqzNhY4U78gClS1O7Q7avCrfF5aSiDko= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= @@ -395,10 +418,14 @@ github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -407,7 +434,7 @@ github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20171110221305-fee913ce8fb2/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= +github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -419,19 +446,19 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OH github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20160930181131-4ee1cc9a8058/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= @@ -439,9 +466,11 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96d github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0 h1:M76yO2HkZASFjXL0HSoZJ1AYEmQxNJmY41Jx1zNUq1Y= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= @@ -449,6 +478,7 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= @@ -456,9 +486,11 @@ github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc2.0.20190611121236-6cc515888830/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= +github.com/openyurtio/apiserver-network-proxy v1.18.8 h1:xXqaP8DAOvCHD7DNIqtBOhuWxCnwULLc1PqOMoJ7UeI= +github.com/openyurtio/apiserver-network-proxy v1.18.8/go.mod h1:X5Au3jBNIgYL2uK0IHeNGnZqlUlVSCFQhi/npPgkKRg= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -468,7 +500,6 @@ github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -477,12 +508,19 @@ github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOi github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= @@ -501,6 +539,7 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -508,13 +547,16 @@ github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOms github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= @@ -546,13 +588,15 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/syndtr/gocapability v0.0.0-20160928074757-e7cb7fa329f4/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= @@ -562,54 +606,61 @@ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGr github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e h1:f1yevOHP+Suqk0rVc13fIkzcLULJbyQcXDba2klljD0= -github.com/vishvananda/netlink v0.0.0-20171020171820-b2de5d10e38e/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936 h1:J9gO8RJCAFlln1jsvRba/CWVUnMHwObklfxxjErl1uk= github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -626,7 +677,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -638,16 +689,14 @@ golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -660,17 +709,19 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -681,26 +732,24 @@ golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -715,6 +764,7 @@ golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -728,36 +778,36 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873 h1:nfPFGzJkUDX6uBmpN/pSw7MbOAWegH5QDQuoXFHedLg= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -768,13 +818,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= @@ -794,10 +844,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -811,30 +859,29 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.16.9 h1:3vCx0WX9qcg1Hv4aQ/G1tiIKectGVuimvPVTJU4VOCA= -k8s.io/api v0.16.9/go.mod h1:Y7dZNHs1Xy0mSwSlzL9QShi6qkljnN41yR8oWCRTDe8= -k8s.io/apiextensions-apiserver v0.16.9 h1:CE+SWS6PM3MDJiyihW5hnDiqsJ/sjMaSMblqzH37J18= -k8s.io/apiextensions-apiserver v0.16.9/go.mod h1:j/+KedxOeRSPMkvLNyKMbIT3+saXdTO4jTBplTmXJR4= -k8s.io/apimachinery v0.16.10-beta.0 h1:l+qmzwWTMIBtFGlo5OpPYoZKCgGLtpAWvIa8Wcr9luU= -k8s.io/apimachinery v0.16.10-beta.0/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= -k8s.io/apiserver v0.16.9 h1:+gYGD2LFXI9twZpWFyZgh29YfSLyTO27IzgEF12MgJg= -k8s.io/apiserver v0.16.9/go.mod h1:JWzfDIpD8e9rvU+Gn6ew8MfQZq41USj0iwW5+ZLyTLM= -k8s.io/cli-runtime v0.16.9/go.mod h1:gVhdxu/z31/5nsr4yciGJrdODVhBH1mboFYzqMAlsJc= -k8s.io/client-go v0.16.9 h1:6Eh4lMDxFtDzBkqid1AOL3bQ/pPYrulx8l23DXw4mRU= -k8s.io/client-go v0.16.9/go.mod h1:ThjPlh7Kx+XoBFOCt775vx5J7atwY7F/zaFzTco5gL0= -k8s.io/cloud-provider v0.16.9 h1:blU70eSYwwKH1os3nIDRbQ4tHY5WMIbNdY23A32DU6E= -k8s.io/cloud-provider v0.16.9/go.mod h1:h5w+p2akfq206hhk+gtiUWAHNK093+FxTuSfIlOKoSo= -k8s.io/cluster-bootstrap v0.16.9 h1:3wRkZr3+iZkpBGg6nj31JvZoiH6c2SvoivNYO5mAKRI= -k8s.io/cluster-bootstrap v0.16.9/go.mod h1:Ou7X3KqHG/I/9dcZK/e4Z8mQMVhxajbQjXPQPB5EA2g= -k8s.io/code-generator v0.16.10-beta.0/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= -k8s.io/component-base v0.16.9 h1:ChdRdMGDq9vTq5vJRaQ8VuEHLwhDJ+eAvfNghZqJcck= -k8s.io/component-base v0.16.9/go.mod h1:5iNKIRj8yEaKG+baEkfXgU9JiWpC1WAFGBZ3Xg9fDJk= -k8s.io/cri-api v0.16.13-rc.0 h1:M4xw9Z/auiF80M62ZegcNG8xOOg32Zwlw3bnFfJAnfE= -k8s.io/cri-api v0.16.13-rc.0/go.mod h1:W6aMMPN5fmxcRGaHnb6BEfoTeS82OsJcsUJyKf+EWYc= -k8s.io/csi-translation-lib v0.16.9 h1:TrTAZzxQdMiej6NCPPoRwf5dxs8iFOQ2mnO2jEE0rY0= -k8s.io/csi-translation-lib v0.16.9/go.mod h1:+y+WYfHErQ/gDn9UpPBqmtOYLrTpedu/vuMhLsiuWI8= +k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4= +k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= +k8s.io/apiextensions-apiserver v0.18.8/go.mod h1:7f4ySEkkvifIr4+BRrRWriKKIJjPyg9mb/p63dJKnlM= +k8s.io/apimachinery v0.18.9-rc.0 h1:RnhcqZsTFI3l4z1iXNJhP0PrC4Gnrw5WlSjRhoRYifs= +k8s.io/apimachinery v0.18.9-rc.0/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= +k8s.io/apiserver v0.18.8 h1:Au4kMn8sb1zFdyKqc8iMHLsYLxRI6Y+iAhRNKKQtlBY= +k8s.io/apiserver v0.18.8/go.mod h1:12u5FuGql8Cc497ORNj79rhPdiXQC4bf53X/skR/1YM= +k8s.io/cli-runtime v0.18.8/go.mod h1:7EzWiDbS9PFd0hamHHVoCY4GrokSTPSL32MA4rzIu0M= +k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM= +k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU= +k8s.io/cloud-provider v0.18.8 h1:XNCJIzKFtoXhn6cyyXe7JWde0KjK6o8vo2Dtat7hb6Q= +k8s.io/cloud-provider v0.18.8/go.mod h1:cn9AlzMPVIXA4HHLVbgGUigaQlZyHSZ7WAwDEFNrQSs= +k8s.io/cluster-bootstrap v0.18.8 h1:+gkx/sfGBtokxvRbVA5nVA8bPy1YvpDYRiGRqyEtSXc= +k8s.io/cluster-bootstrap v0.18.8/go.mod h1:guq0Uc+QwazHgpS1yAw5Z7yUlBCtGppbgWQkbN3lxIY= +k8s.io/code-generator v0.18.18-rc.0/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/component-base v0.18.8 h1:BW5CORobxb6q5mb+YvdwQlyXXS6NVH5fDXWbU7tf2L8= +k8s.io/component-base v0.18.8/go.mod h1:00frPRDas29rx58pPCxNkhUfPbwajlyyvu8ruNgSErU= +k8s.io/cri-api v0.18.18-rc.0 h1:/ON2X6LsejWId5HntijA0AQ2NewlT+4bAtDzck8CSG4= +k8s.io/cri-api v0.18.18-rc.0/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= +k8s.io/csi-translation-lib v0.18.8 h1:HdyTgN4+O0zPDsF3rDGVYNwuhsG16HLQvC7lKuIxBq4= +k8s.io/csi-translation-lib v0.18.8/go.mod h1:6cA6Btlzxy9s3QrS4BCZzQqclIWnTLr6Jx3H2ctAzY4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -842,20 +889,22 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/kube-aggregator v0.16.9/go.mod h1:Zki0k+m5GSXrMNpTPuaF5MTtuwMNte/JBQ2IDOmY75A= -k8s.io/kube-controller-manager v0.16.9/go.mod h1:PhcH/CYeaMn53OycVUHn9yvtz/n3C0wTF9Zpc/NvSsA= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= -k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-proxy v0.16.9/go.mod h1:UOKCVRn6vgVgjUhV0v/vFdxcv07aIeKH0JyZM9Tli6w= -k8s.io/kube-scheduler v0.16.9/go.mod h1:mDruQFpyAyhsCC0/vZBqGjwp0oyGhSPzkejf9aFH46Q= -k8s.io/kubectl v0.16.9 h1:DBgsfFGf+wQiZyz/Q4gJVxfuNQFR20f/IQ4gj+C4qjU= -k8s.io/kubectl v0.16.9/go.mod h1:FZ8ibvEMKjHC1yfi+vr8eBVX3VpoVOkrcdVJz5e6T3o= -k8s.io/kubelet v0.16.9/go.mod h1:KVj02L3uHVoEDC7buGK7WA/S8b42G8OFbvaYROws+0U= -k8s.io/legacy-cloud-providers v0.16.9/go.mod h1:BEiLL1gweb+0X4fn2HAQGIFBDOsSAYMcwUk4O9LWn5M= -k8s.io/metrics v0.16.9/go.mod h1:mIG8NlDrZsU1edgU35qlFKP7e4J8snLMXBh5lhR7aL0= +k8s.io/kube-aggregator v0.18.8/go.mod h1:CyLoGZB+io8eEwnn+6RbV7QWJQhj8a3TBH8ZM8sLbhI= +k8s.io/kube-controller-manager v0.18.8/go.mod h1:IYZteddXJFD1TVgAw8eRP3c9OOA2WtHdXdE8aH6gXnc= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-proxy v0.18.8/go.mod h1:u4E8OsUpUzfZ9CEFf9rdLsbYiusZr8utbtF4WQrX+qs= +k8s.io/kube-scheduler v0.18.8 h1:t08qdlU0UzBoKaOmNu0ripIHawR4emSJUTFdRZMg6Zk= +k8s.io/kube-scheduler v0.18.8/go.mod h1:OeliYiILv1XkSq0nmQjRewgt5NimKsTidZFEhfL5fqA= +k8s.io/kubectl v0.18.8 h1:qTkHCz21YmK0+S0oE6TtjtxmjeDP42gJcZJyRKsIenA= +k8s.io/kubectl v0.18.8/go.mod h1:PlEgIAjOMua4hDFTEkVf+W5M0asHUKfE4y7VDZkpLHM= +k8s.io/kubelet v0.18.8/go.mod h1:6z1jHCk0NPE6WshFStfqcgQ1bnD3tetcPmhC2915aio= +k8s.io/legacy-cloud-providers v0.18.8/go.mod h1:tgp4xYf6lvjrWnjQwTOPvWQE9IVqSBGPF4on0IyICQE= +k8s.io/metrics v0.18.8/go.mod h1:j7JzZdiyhLP2BsJm/Fzjs+j5Lb1Y7TySjhPWqBPwRXA= k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.16.9/go.mod h1:FQx3+vFR9swB9s36sc9dC+IMEMh/OWqw+gODr45KKGE= -k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/sample-apiserver v0.18.8/go.mod h1:qXPfVwaZwM2owoSMNRRm9vw+HNJGLNsBpGckv1uxWy4= +k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= @@ -866,15 +915,13 @@ modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy v0.0.15 h1:WAEMs4XNCgavxNfu3lYkWZJ0v+tOlXNB7tnUBD22vAg= -sigs.k8s.io/apiserver-network-proxy v0.0.15/go.mod h1:9/9TjgHy6ORJtVUvPVDGLcc6M9Ki2WAfE8xlighdSaw= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15 h1:4uqm9Mv+w2MmBYD+F4qf/v6tDFUdPOk29C095RbU5mY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff v1.0.2 h1:WiMoyniAVAYm03w+ImfF9IE2G23GLR/SwDnQyaNZvPk= -sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/controller/nodelifecycle/metrics.go b/pkg/controller/nodelifecycle/metrics.go index baa78447ecb..94e064316cc 100644 --- a/pkg/controller/nodelifecycle/metrics.go +++ b/pkg/controller/nodelifecycle/metrics.go @@ -19,7 +19,8 @@ package nodelifecycle import ( "sync" - "github.com/prometheus/client_golang/prometheus" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" ) const ( @@ -31,35 +32,39 @@ const ( ) var ( - zoneHealth = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Subsystem: nodeControllerSubsystem, - Name: zoneHealthStatisticKey, - Help: "Gauge measuring percentage of healthy nodes per zone.", + zoneHealth = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: nodeControllerSubsystem, + Name: zoneHealthStatisticKey, + Help: "Gauge measuring percentage of healthy nodes per zone.", + StabilityLevel: metrics.ALPHA, }, []string{"zone"}, ) - zoneSize = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Subsystem: nodeControllerSubsystem, - Name: zoneSizeKey, - Help: "Gauge measuring number of registered Nodes per zones.", + zoneSize = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: nodeControllerSubsystem, + Name: zoneSizeKey, + Help: "Gauge measuring number of registered Nodes per zones.", + StabilityLevel: metrics.ALPHA, }, []string{"zone"}, ) - unhealthyNodes = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Subsystem: nodeControllerSubsystem, - Name: zoneNoUnhealthyNodesKey, - Help: "Gauge measuring number of not Ready Nodes per zones.", + unhealthyNodes = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: nodeControllerSubsystem, + Name: zoneNoUnhealthyNodesKey, + Help: "Gauge measuring number of not Ready Nodes per zones.", + StabilityLevel: metrics.ALPHA, }, []string{"zone"}, ) - evictionsNumber = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Subsystem: nodeControllerSubsystem, - Name: evictionsNumberKey, - Help: "Number of Node evictions that happened since current instance of NodeController started.", + evictionsNumber = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: nodeControllerSubsystem, + Name: evictionsNumberKey, + Help: "Number of Node evictions that happened since current instance of NodeController started.", + StabilityLevel: metrics.ALPHA, }, []string{"zone"}, ) @@ -70,9 +75,9 @@ var registerMetrics sync.Once // Register the metrics that are to be monitored. func Register() { registerMetrics.Do(func() { - prometheus.MustRegister(zoneHealth) - prometheus.MustRegister(zoneSize) - prometheus.MustRegister(unhealthyNodes) - prometheus.MustRegister(evictionsNumber) + legacyregistry.MustRegister(zoneHealth) + legacyregistry.MustRegister(zoneSize) + legacyregistry.MustRegister(unhealthyNodes) + legacyregistry.MustRegister(evictionsNumber) }) } diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller.go b/pkg/controller/nodelifecycle/node_lifecycle_controller.go index 8b1b64a1191..a44420e7fd6 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller.go @@ -23,6 +23,7 @@ limitations under the License. package nodelifecycle import ( + "context" "fmt" "strings" "sync" @@ -32,7 +33,7 @@ import ( "github.com/openyurtio/openyurt/pkg/controller/nodelifecycle/scheduler" nodeutil "github.com/openyurtio/openyurt/pkg/controller/util/node" - coordv1beta1 "k8s.io/api/coordination/v1beta1" + coordv1 "k8s.io/api/coordination/v1" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -42,23 +43,22 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" appsv1informers "k8s.io/client-go/informers/apps/v1" - coordinformers "k8s.io/client-go/informers/coordination/v1beta1" + coordinformers "k8s.io/client-go/informers/coordination/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" v1core "k8s.io/client-go/kubernetes/typed/core/v1" appsv1listers "k8s.io/client-go/listers/apps/v1" - coordlisters "k8s.io/client-go/listers/coordination/v1beta1" + coordlisters "k8s.io/client-go/listers/coordination/v1" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" + "k8s.io/component-base/metrics/prometheus/ratelimiter" "k8s.io/kubernetes/pkg/controller" kubefeatures "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" - "k8s.io/kubernetes/pkg/util/metrics" utilnode "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" ) @@ -71,14 +71,14 @@ func init() { var ( // UnreachableTaintTemplate is the taint for when a node becomes unreachable. UnreachableTaintTemplate = &v1.Taint{ - Key: schedulerapi.TaintNodeUnreachable, + Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoExecute, } // NotReadyTaintTemplate is the taint for when a node is not ready for // executing pods NotReadyTaintTemplate = &v1.Taint{ - Key: schedulerapi.TaintNodeNotReady, + Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoExecute, } @@ -88,30 +88,30 @@ var ( // for certain NodeConditionType, there are multiple {ConditionStatus,TaintKey} pairs nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]map[v1.ConditionStatus]string{ v1.NodeReady: { - v1.ConditionFalse: schedulerapi.TaintNodeNotReady, - v1.ConditionUnknown: schedulerapi.TaintNodeUnreachable, + v1.ConditionFalse: v1.TaintNodeNotReady, + v1.ConditionUnknown: v1.TaintNodeUnreachable, }, v1.NodeMemoryPressure: { - v1.ConditionTrue: schedulerapi.TaintNodeMemoryPressure, + v1.ConditionTrue: v1.TaintNodeMemoryPressure, }, v1.NodeDiskPressure: { - v1.ConditionTrue: schedulerapi.TaintNodeDiskPressure, + v1.ConditionTrue: v1.TaintNodeDiskPressure, }, v1.NodeNetworkUnavailable: { - v1.ConditionTrue: schedulerapi.TaintNodeNetworkUnavailable, + v1.ConditionTrue: v1.TaintNodeNetworkUnavailable, }, v1.NodePIDPressure: { - v1.ConditionTrue: schedulerapi.TaintNodePIDPressure, + v1.ConditionTrue: v1.TaintNodePIDPressure, }, } taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{ - schedulerapi.TaintNodeNotReady: v1.NodeReady, - schedulerapi.TaintNodeUnreachable: v1.NodeReady, - schedulerapi.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable, - schedulerapi.TaintNodeMemoryPressure: v1.NodeMemoryPressure, - schedulerapi.TaintNodeDiskPressure: v1.NodeDiskPressure, - schedulerapi.TaintNodePIDPressure: v1.NodePIDPressure, + v1.TaintNodeNotReady: v1.NodeReady, + v1.TaintNodeUnreachable: v1.NodeReady, + v1.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable, + v1.TaintNodeMemoryPressure: v1.NodeMemoryPressure, + v1.TaintNodeDiskPressure: v1.NodeDiskPressure, + v1.TaintNodePIDPressure: v1.NodePIDPressure, } ) @@ -130,7 +130,7 @@ const ( retrySleepTime = 20 * time.Millisecond nodeNameKeyIndex = "spec.nodeName" // podUpdateWorkerSizes assumes that in most cases pod will be handled by monitorNodeHealth pass. - // Pod update workes will only handle lagging cache pods. 4 workes should be enough. + // Pod update workers will only handle lagging cache pods. 4 workers should be enough. podUpdateWorkerSize = 4 ) @@ -169,7 +169,7 @@ type nodeHealthData struct { probeTimestamp metav1.Time readyTransitionTimestamp metav1.Time status *v1.NodeStatus - lease *coordv1beta1.Lease + lease *coordv1.Lease } func (n *nodeHealthData) deepCopy() *nodeHealthData { @@ -291,7 +291,6 @@ type Controller struct { nodeEvictionMap *nodeEvictionMap // workers that evicts pods from unresponsive nodes. zonePodEvictor map[string]*scheduler.RateLimitedTimedQueue - // workers that are responsible for tainting nodes. zoneNoExecuteTainter map[string]*scheduler.RateLimitedTimedQueue @@ -353,14 +352,6 @@ type Controller struct { // tainted nodes, if they're not tolerated. runTaintManager bool - // if set to true Controller will taint Nodes with 'TaintNodeNotReady' and 'TaintNodeUnreachable' - // taints instead of evicting Pods itself. - useTaintBasedEvictions bool - - // if set to true, NodeController will taint Nodes based on its condition for 'NetworkUnavailable', - // 'MemoryPressure', 'PIDPressure' and 'DiskPressure'. - taintNodeByCondition bool - nodeUpdateQueue workqueue.Interface podUpdateQueue workqueue.RateLimitingInterface } @@ -381,8 +372,7 @@ func NewNodeLifecycleController( largeClusterThreshold int32, unhealthyZoneThreshold float32, runTaintManager bool, - useTaintBasedEvictions bool, - taintNodeByCondition bool) (*Controller, error) { +) (*Controller, error) { if kubeClient == nil { klog.Fatalf("kubeClient is nil when starting Controller") @@ -399,7 +389,7 @@ func NewNodeLifecycleController( }) if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil { - metrics.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) + ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } nc := &Controller{ @@ -422,14 +412,9 @@ func NewNodeLifecycleController( largeClusterThreshold: largeClusterThreshold, unhealthyZoneThreshold: unhealthyZoneThreshold, runTaintManager: runTaintManager, - useTaintBasedEvictions: useTaintBasedEvictions && runTaintManager, - taintNodeByCondition: taintNodeByCondition, nodeUpdateQueue: workqueue.NewNamed("node_lifecycle_controller"), podUpdateQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_lifecycle_controller_pods"), } - if useTaintBasedEvictions { - klog.Infof("Controller is using taint based evictions.") - } nc.enterPartialDisruptionFunc = nc.ReducedQPSFunc nc.enterFullDisruptionFunc = nc.HealthyQPSFunc @@ -485,6 +470,7 @@ func NewNodeLifecycleController( return []string{pod.Spec.NodeName}, nil }, }) + podIndexer := podInformer.Informer().GetIndexer() nc.getPodsAssignedToNode = func(nodeName string) ([]*v1.Pod, error) { objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName) @@ -542,17 +528,8 @@ func NewNodeLifecycleController( }), }) - if nc.taintNodeByCondition { - klog.Infof("Controller will taint node by condition.") - } - nc.leaseLister = leaseInformer.Lister() - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeLease) { - nc.leaseInformerSynced = leaseInformer.Informer().HasSynced - } else { - // Always indicate that lease is synced to prevent syncing lease. - nc.leaseInformerSynced = func() bool { return true } - } + nc.leaseInformerSynced = leaseInformer.Informer().HasSynced nc.nodeLister = nodeInformer.Lister() nc.nodeInformerSynced = nodeInformer.Informer().HasSynced @@ -595,7 +572,7 @@ func (nc *Controller) Run(stopCh <-chan struct{}) { go wait.Until(nc.doPodProcessingWorker, time.Second, stopCh) } - if nc.useTaintBasedEvictions { + if nc.runTaintManager { // Handling taint based evictions. Because we don't want a dedicated logic in TaintManager for NC-originated // taints and we normally don't rate limit evictions caused by taints, we need to rate limit adding taints. go wait.Until(nc.doNoExecuteTaintingPass, scheduler.NodeEvictionPeriod, stopCh) @@ -625,11 +602,9 @@ func (nc *Controller) doNodeProcessingPassWorker() { return } nodeName := obj.(string) - if nc.taintNodeByCondition { - if err := nc.doNoScheduleTaintingPass(nodeName); err != nil { - klog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err) - // TODO(k82cn): Add nodeName back to the queue - } + if err := nc.doNoScheduleTaintingPass(nodeName); err != nil { + klog.Errorf("Failed to taint NoSchedule on node <%s>, requeue it: %v", nodeName, err) + // TODO(k82cn): Add nodeName back to the queue } // TODO: re-evaluate whether there are any labels that need to be // reconcile in 1.19. Remove this function if it's no longer necessary. @@ -666,7 +641,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error { if node.Spec.Unschedulable { // If unschedulable, append related taint. taints = append(taints, v1.Taint{ - Key: schedulerapi.TaintNodeUnschedulable, + Key: v1.TaintNodeUnschedulable, Effect: v1.TaintEffectNoSchedule, }) } @@ -678,7 +653,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error { return false } // Find unschedulable taint of node. - if t.Key == schedulerapi.TaintNodeUnschedulable { + if t.Key == v1.TaintNodeUnschedulable { return true } // Find node condition taints of node. @@ -785,9 +760,7 @@ func (nc *Controller) doEvictionPass() { // monitorNodeHealth verifies node health are constantly updated by kubelet, and // if not, post "NodeReady==ConditionUnknown". -// For nodes who are not ready or not reachable for a long period of time. -// This function will taint them if TaintBasedEvictions feature was enabled. -// Otherwise, it would evict it directly. +// This function will taint nodes who are not ready or not reachable for a long period of time. func (nc *Controller) monitorNodeHealth() error { // We are listing nodes from local cache as we can tolerate some small delays // comparing to state from etcd and there is eventual consistency anyway. @@ -806,7 +779,7 @@ func (nc *Controller) monitorNodeHealth() error { nodeutil.RecordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in Controller", added[i].Name)) nc.knownNodeSet[added[i].Name] = added[i] nc.addPodEvictorForNewZone(added[i]) - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.markNodeAsReachable(added[i]) } else { nc.cancelPodEviction(added[i]) @@ -831,7 +804,7 @@ func (nc *Controller) monitorNodeHealth() error { return true, nil } name := node.Name - node, err = nc.kubeClient.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + node, err = nc.kubeClient.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { klog.Errorf("Failed while getting a Node to retry updating node health. Probably Node %s was deleted.", name) return false, err @@ -860,7 +833,7 @@ func (nc *Controller) monitorNodeHealth() error { } continue } - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.processTaintBaseEviction(node, &observedReadyCondition) } else { if err := nc.processNoTaintBaseEviction(node, &observedReadyCondition, gracePeriod, pods); err != nil { @@ -911,7 +884,7 @@ func (nc *Controller) processTaintBaseEviction(node *v1.Node, observedReadyCondi if taintutils.TaintExists(node.Spec.Taints, NotReadyTaintTemplate) { taintToAdd := *UnreachableTaintTemplate if !nodeutil.SwapNodeControllerTaint(nc.kubeClient, []*v1.Taint{&taintToAdd}, []*v1.Taint{NotReadyTaintTemplate}, node) { - klog.Errorf("Failed to instantly swap UnreachableTaint to NotReadyTaint. Will try again in the next cycle.") + klog.Errorf("Failed to instantly swap NotReadyTaint to UnreachableTaint. Will try again in the next cycle.") } } else if nc.markNodeForTainting(node) { klog.V(2).Infof("Node %v is unresponsive as of %v. Adding it to the Taint queue.", @@ -1066,7 +1039,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node // - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check // if that's the case, but it does not seem necessary. var savedCondition *v1.NodeCondition - var savedLease *coordv1beta1.Lease + var savedLease *coordv1.Lease if nodeHealth != nil { _, savedCondition = nodeutil.GetNodeCondition(nodeHealth.status, v1.NodeReady) savedLease = nodeHealth.lease @@ -1115,17 +1088,14 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node readyTransitionTimestamp: transitionTime, } } - var observedLease *coordv1beta1.Lease - if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.NodeLease) { - // Always update the probe time if node lease is renewed. - // Note: If kubelet never posted the node status, but continues renewing the - // heartbeat leases, the node controller will assume the node is healthy and - // take no action. - observedLease, _ = nc.leaseLister.Leases(v1.NamespaceNodeLease).Get(node.Name) - if observedLease != nil && (savedLease == nil || savedLease.Spec.RenewTime.Before(observedLease.Spec.RenewTime)) { - nodeHealth.lease = observedLease - nodeHealth.probeTimestamp = nc.now() - } + // Always update the probe time if node lease is renewed. + // Note: If kubelet never posted the node status, but continues renewing the + // heartbeat leases, the node controller will assume the node is healthy and + // take no action. + observedLease, _ := nc.leaseLister.Leases(v1.NamespaceNodeLease).Get(node.Name) + if observedLease != nil && (savedLease == nil || savedLease.Spec.RenewTime.Before(observedLease.Spec.RenewTime)) { + nodeHealth.lease = observedLease + nodeHealth.probeTimestamp = nc.now() } if nc.now().After(nodeHealth.probeTimestamp.Add(gracePeriod)) { @@ -1155,7 +1125,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node LastTransitionTime: nowTimestamp, }) } else { - klog.V(4).Infof("node %v hasn't been updated for %+v. Last %v is: %+v", + klog.V(2).Infof("node %v hasn't been updated for %+v. Last %v is: %+v", node.Name, nc.now().Time.Sub(nodeHealth.probeTimestamp.Time), nodeConditionType, currentCondition) if currentCondition.Status != v1.ConditionUnknown { currentCondition.Status = v1.ConditionUnknown @@ -1169,7 +1139,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node _, currentReadyCondition = nodeutil.GetNodeCondition(&node.Status, v1.NodeReady) if !apiequality.Semantic.DeepEqual(currentReadyCondition, &observedReadyCondition) { - if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil { + if _, err := nc.kubeClient.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{}); err != nil { klog.Errorf("Error updating node %s: %v", node.Name, err) return gracePeriod, observedReadyCondition, currentReadyCondition, err } @@ -1229,7 +1199,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod if allAreFullyDisrupted { klog.V(0).Info("Controller detected that all Nodes are not-Ready. Entering master disruption mode.") for i := range nodes { - if nc.useTaintBasedEvictions { + if nc.runTaintManager { _, err := nc.markNodeAsReachable(nodes[i]) if err != nil { klog.Errorf("Failed to remove taints from Node %v", nodes[i].Name) @@ -1240,7 +1210,7 @@ func (nc *Controller) handleDisruption(zoneToNodeConditions map[string][]*v1.Nod } // We stop all evictions. for k := range nc.zoneStates { - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.zoneNoExecuteTainter[k].SwapLimiter(0) } else { nc.zonePodEvictor[k].SwapLimiter(0) @@ -1352,7 +1322,7 @@ func (nc *Controller) processPod(podItem podUpdateItem) { pods := []*v1.Pod{pod} // In taint-based eviction mode, only node updates are processed by NodeLifecycleController. // Pods are processed by TaintManager. - if !nc.useTaintBasedEvictions { + if !nc.runTaintManager { if err := nc.processNoTaintBaseEviction(node, currentReadyCondition, nc.nodeMonitorGracePeriod, pods); err != nil { klog.Warningf("Unable to process pod %+v eviction from node %v: %v.", podItem, nodeName, err) nc.podUpdateQueue.AddRateLimited(podItem) @@ -1371,13 +1341,13 @@ func (nc *Controller) processPod(podItem podUpdateItem) { func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneState) { switch state { case stateNormal: - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter(nc.evictionLimiterQPS) } else { nc.zonePodEvictor[zone].SwapLimiter(nc.evictionLimiterQPS) } case statePartialDisruption: - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter( nc.enterPartialDisruptionFunc(zoneSize)) } else { @@ -1385,7 +1355,7 @@ func (nc *Controller) setLimiterInZone(zone string, zoneSize int, state ZoneStat nc.enterPartialDisruptionFunc(zoneSize)) } case stateFullDisruption: - if nc.useTaintBasedEvictions { + if nc.runTaintManager { nc.zoneNoExecuteTainter[zone].SwapLimiter( nc.enterFullDisruptionFunc(zoneSize)) } else { @@ -1451,7 +1421,7 @@ func (nc *Controller) addPodEvictorForNewZone(node *v1.Node) { zone := utilnode.GetZoneKey(node) if _, found := nc.zoneStates[zone]; !found { nc.zoneStates[zone] = stateInitial - if !nc.useTaintBasedEvictions { + if !nc.runTaintManager { nc.zonePodEvictor[zone] = scheduler.NewRateLimitedTimedQueue( flowcontrol.NewTokenBucketRateLimiter(nc.evictionLimiterQPS, scheduler.EvictionRateLimiterBurst)) diff --git a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go index 950baa09827..a7151b6e4c8 100644 --- a/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go +++ b/pkg/controller/nodelifecycle/node_lifecycle_controller_test.go @@ -18,6 +18,7 @@ limitations under the License. package nodelifecycle import ( + "context" "fmt" "strings" "testing" @@ -25,7 +26,7 @@ import ( "github.com/openyurtio/openyurt/pkg/controller/nodelifecycle/scheduler" apps "k8s.io/api/apps/v1" - coordv1beta1 "k8s.io/api/coordination/v1beta1" + coordv1 "k8s.io/api/coordination/v1" v1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" @@ -37,7 +38,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" appsinformers "k8s.io/client-go/informers/apps/v1" - coordinformers "k8s.io/client-go/informers/coordination/v1beta1" + coordinformers "k8s.io/client-go/informers/coordination/v1" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" @@ -48,7 +49,6 @@ import ( nodeutil "k8s.io/kubernetes/pkg/controller/util/node" "k8s.io/kubernetes/pkg/features" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" - schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/pkg/util/node" taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/utils/pointer" @@ -68,7 +68,7 @@ func alwaysReady() bool { return true } func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) { return func(nodeName string) ([]*v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) @@ -113,20 +113,20 @@ func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNode return false } -func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1beta1.Lease { - return &coordv1beta1.Lease{ +func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1.Lease { + return &coordv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: nodeName, Namespace: v1.NamespaceNodeLease, }, - Spec: coordv1beta1.LeaseSpec{ + Spec: coordv1.LeaseSpec{ HolderIdentity: pointer.StringPtr(nodeName), RenewTime: &renewTime, }, } } -func (nc *nodeLifecycleController) syncLeaseStore(lease *coordv1beta1.Lease) error { +func (nc *nodeLifecycleController) syncLeaseStore(lease *coordv1.Lease) error { if lease == nil { return nil } @@ -136,7 +136,7 @@ func (nc *nodeLifecycleController) syncLeaseStore(lease *coordv1beta1.Lease) err } func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeNodeHandler) error { - nodes, err := fakeNodeHandler.List(metav1.ListOptions{}) + nodes, err := fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) if err != nil { return err } @@ -162,7 +162,7 @@ func newNodeLifecycleControllerFromClient( factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) - leaseInformer := factory.Coordination().V1beta1().Leases() + leaseInformer := factory.Coordination().V1().Leases() nodeInformer := factory.Core().V1().Nodes() daemonSetInformer := factory.Apps().V1().DaemonSets() @@ -181,8 +181,6 @@ func newNodeLifecycleControllerFromClient( largeClusterThreshold, unhealthyZoneThreshold, useTaints, - useTaints, - useTaints, ) if err != nil { return nil, err @@ -200,8 +198,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) evictionTimeout := 10 * time.Minute labels := map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", } // Because of the logic that prevents NC from evicting anything when all Nodes are NotReady @@ -237,8 +237,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: fakeNow, Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, }, @@ -247,8 +249,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -317,8 +321,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -337,8 +343,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -381,8 +389,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -401,8 +411,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -472,8 +484,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -492,8 +506,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -536,8 +552,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -556,8 +574,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -600,8 +620,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -620,8 +642,10 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -710,7 +734,7 @@ func TestMonitorNodeHealthEvictPods(t *testing.T) { return true, 0 }) } else { - t.Fatalf("Zone %v was unitialized!", zone) + t.Fatalf("Zone %v was uninitialized!", zone) } } @@ -765,8 +789,10 @@ func TestPodStatusChange(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -933,8 +959,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -953,8 +981,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -988,8 +1018,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1008,8 +1040,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region2", - v1.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegionStable: "region2", + v1.LabelZoneFailureDomainStable: "zone2", + v1.LabelZoneRegion: "region2", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1050,8 +1084,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1070,8 +1106,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone2", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1111,8 +1149,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1131,8 +1171,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node-master", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1170,8 +1212,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1190,8 +1234,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone2", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone2", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone2", }, }, Status: v1.NodeStatus{ @@ -1232,8 +1278,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1252,8 +1300,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1272,8 +1322,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node2", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1292,8 +1344,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node3", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1312,8 +1366,10 @@ func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) { Name: "node4", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -1692,17 +1748,15 @@ func TestMonitorNodeHealthUpdateStatus(t *testing.T) { } func TestMonitorNodeHealthUpdateNodeAndPodStatusWithLease(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)() - nodeCreationTime := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC) fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC) testcases := []struct { description string fakeNodeHandler *testutil.FakeNodeHandler - lease *coordv1beta1.Lease + lease *coordv1.Lease timeToPass time.Duration newNodeStatus v1.NodeStatus - newLease *coordv1beta1.Lease + newLease *coordv1.Lease expectedRequestCount int expectedNodes []*v1.Node expectedPodStatusUpdate bool @@ -2587,8 +2641,10 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2609,8 +2665,10 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2630,8 +2688,10 @@ func TestApplyNoExecuteTaints(t *testing.T) { Name: "node2", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2680,7 +2740,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { t.Errorf("unexpected error: %v", err) } nodeController.doNoExecuteTaintingPass() - node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{}) + node0, err := fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node0...") return @@ -2688,7 +2748,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { if !taintutils.TaintExists(node0.Spec.Taints, UnreachableTaintTemplate) { t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints) } - node2, err := fakeNodeHandler.Get("node2", metav1.GetOptions{}) + node2, err := fakeNodeHandler.Get(context.TODO(), "node2", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node2...") return @@ -2699,7 +2759,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { // Make node3 healthy again. node2.Status = healthyNodeNewStatus - _, err = fakeNodeHandler.UpdateStatus(node2) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node2, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return @@ -2712,7 +2772,7 @@ func TestApplyNoExecuteTaints(t *testing.T) { } nodeController.doNoExecuteTaintingPass() - node2, err = fakeNodeHandler.Get("node2", metav1.GetOptions{}) + node2, err = fakeNodeHandler.Get(context.TODO(), "node2", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node2...") return @@ -2734,8 +2794,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2757,8 +2819,10 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { Name: "node1", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2822,12 +2886,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { } nodeController.doNoExecuteTaintingPass() - node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{}) + node0, err := fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node0...") return } - node1, err := fakeNodeHandler.Get("node1", metav1.GetOptions{}) + node1, err := fakeNodeHandler.Get(context.TODO(), "node1", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node1...") return @@ -2841,12 +2905,12 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { node0.Status = newNodeStatus node1.Status = healthyNodeNewStatus - _, err = fakeNodeHandler.UpdateStatus(node0) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node0, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return } - _, err = fakeNodeHandler.UpdateStatus(node1) + _, err = fakeNodeHandler.UpdateStatus(context.TODO(), node1, metav1.UpdateOptions{}) if err != nil { t.Errorf(err.Error()) return @@ -2860,7 +2924,7 @@ func TestSwapUnreachableNotReadyTaints(t *testing.T) { } nodeController.doNoExecuteTaintingPass() - node0, err = fakeNodeHandler.Get("node0", metav1.GetOptions{}) + node0, err = fakeNodeHandler.Get(context.TODO(), "node0", metav1.GetOptions{}) if err != nil { t.Errorf("Can't get current node0...") return @@ -2883,8 +2947,10 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2918,15 +2984,15 @@ func TestTaintsNodeByCondition(t *testing.T) { nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset) networkUnavailableTaint := &v1.Taint{ - Key: schedulerapi.TaintNodeNetworkUnavailable, + Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule, } notReadyTaint := &v1.Taint{ - Key: schedulerapi.TaintNodeNotReady, + Key: v1.TaintNodeNotReady, Effect: v1.TaintEffectNoSchedule, } unreachableTaint := &v1.Taint{ - Key: schedulerapi.TaintNodeUnreachable, + Key: v1.TaintNodeUnreachable, Effect: v1.TaintEffectNoSchedule, } @@ -2942,8 +3008,10 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -2972,8 +3040,10 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -3002,8 +3072,10 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -3026,8 +3098,10 @@ func TestTaintsNodeByCondition(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -3046,7 +3120,7 @@ func TestTaintsNodeByCondition(t *testing.T) { } for _, test := range tests { - fakeNodeHandler.Update(test.Node) + fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{}) if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Errorf("unexpected error: %v", err) } @@ -3148,8 +3222,10 @@ func TestReconcileNodeLabels(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", - v1.LabelZoneFailureDomain: "zone1", + v1.LabelZoneRegionStable: "region1", + v1.LabelZoneFailureDomainStable: "zone1", + v1.LabelZoneRegion: "region1", + v1.LabelZoneFailureDomain: "zone1", }, }, Status: v1.NodeStatus{ @@ -3204,12 +3280,12 @@ func TestReconcileNodeLabels(t *testing.T) { Name: "node0", CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), Labels: map[string]string{ - v1.LabelZoneRegion: "region1", + v1.LabelZoneRegionStable: "region1", }, }, }, ExpectedLabels: map[string]string{ - v1.LabelZoneRegion: "region1", + v1.LabelZoneRegionStable: "region1", }, }, { @@ -3255,7 +3331,7 @@ func TestReconcileNodeLabels(t *testing.T) { } for _, test := range tests { - fakeNodeHandler.Update(test.Node) + fakeNodeHandler.Update(context.TODO(), test.Node, metav1.UpdateOptions{}) if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -3279,7 +3355,6 @@ func TestReconcileNodeLabels(t *testing.T) { if actualValue != expectedValue { t.Errorf("%s: label %q: expected value %q, got value %q", test.Name, key, expectedValue, actualValue) } - } } } diff --git a/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go index cd04ce13992..03a1fcb889f 100644 --- a/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go +++ b/pkg/controller/nodelifecycle/scheduler/rate_limited_queue.go @@ -300,7 +300,7 @@ func (q *RateLimitedTimedQueue) SwapLimiter(newQPS float32) { // - number of used tokens // - number of available tokens // - something else - if !q.limiter.TryAccept() { + if q.limiter.TryAccept() == false { newLimiter.TryAccept() } } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager.go b/pkg/controller/nodelifecycle/scheduler/taint_manager.go index 04cef55ee64..3c7b7e2e27a 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager.go @@ -18,6 +18,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "hash/fnv" "io" @@ -44,7 +45,7 @@ import ( const ( // TODO (k82cn): Figure out a reasonable number of workers/channels and propagate - // the number of workers up making it a paramater of Run() function. + // the number of workers up making it a parameter of Run() function. // NodeUpdateChannelSize defines the size of channel for node update events. NodeUpdateChannelSize = 10 @@ -110,7 +111,7 @@ func deletePodHandler(c clientset.Interface, emitEventFunc func(types.Namespaced } var err error for i := 0; i < retries; i++ { - err = c.CoreV1().Pods(ns).Delete(name, &metav1.DeleteOptions{}) + err = c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err == nil { break } diff --git a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go index b1be513c00b..30f0ba31793 100644 --- a/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go +++ b/pkg/controller/nodelifecycle/scheduler/taint_manager_test.go @@ -17,6 +17,7 @@ limitations under the License. package scheduler import ( + "context" "fmt" "sort" "sync" @@ -37,32 +38,31 @@ var timeForControllerToProgress = 500 * time.Millisecond func getPodFromClientset(clientset *fake.Clientset) GetPodFunc { return func(name, namespace string) (*v1.Pod, error) { - return clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + return clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } } func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc { return func(nodeName string) ([]*v1.Pod, error) { selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName}) - pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{ + pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ FieldSelector: selector.String(), LabelSelector: labels.Everything().String(), }) if err != nil { return []*v1.Pod{}, fmt.Errorf("failed to get Pods assigned to node %v", nodeName) } - items := make([]*v1.Pod, len(pods.Items)) + rPods := make([]*v1.Pod, len(pods.Items)) for i := range pods.Items { - items[i] = &pods.Items[i] + rPods[i] = &pods.Items[i] } - - return items, nil + return rPods, nil } } func getNodeFromClientset(clientset *fake.Clientset) GetNodeFunc { return func(name string) (*v1.Node, error) { - return clientset.CoreV1().Nodes().Get(name, metav1.GetOptions{}) + return clientset.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) } } @@ -223,7 +223,7 @@ func TestCreatePod(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -320,7 +320,7 @@ func TestUpdatePod(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -376,7 +376,7 @@ func TestCreateNode(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -500,7 +500,7 @@ func TestUpdateNode(t *testing.T) { } } if podDeleted != item.expectDelete { - t.Errorf("%v: Unexepected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) + t.Errorf("%v: Unexpected test result. Expected delete %v, got %v", item.description, item.expectDelete, podDeleted) } close(stopCh) } @@ -622,6 +622,7 @@ func TestUpdateNodeWithMultiplePods(t *testing.T) { func TestGetMinTolerationTime(t *testing.T) { one := int64(1) + two := int64(2) oneSec := 1 * time.Second tests := []struct { @@ -632,6 +633,26 @@ func TestGetMinTolerationTime(t *testing.T) { tolerations: []v1.Toleration{}, expected: 0, }, + { + tolerations: []v1.Toleration{ + { + TolerationSeconds: nil, + }, + }, + expected: -1, + }, + { + tolerations: []v1.Toleration{ + { + TolerationSeconds: &one, + }, + { + TolerationSeconds: &two, + }, + }, + expected: oneSec, + }, + { tolerations: []v1.Toleration{ { @@ -667,7 +688,7 @@ func TestGetMinTolerationTime(t *testing.T) { // TestEventualConsistency verifies if getPodsAssignedToNode returns incomplete data // (e.g. due to watch latency), it will reconcile the remaining pods eventually. // This scenario is partially covered by TestUpdatePods, but given this is an important -// property of TaitManager, it's better to have explicit test for this. +// property of TaintManager, it's better to have explicit test for this. func TestEventualConsistency(t *testing.T) { testCases := []struct { description string diff --git a/pkg/controller/nodelifecycle/scheduler/timed_workers.go b/pkg/controller/nodelifecycle/scheduler/timed_workers.go index d995fb22a36..36bb985130e 100644 --- a/pkg/controller/nodelifecycle/scheduler/timed_workers.go +++ b/pkg/controller/nodelifecycle/scheduler/timed_workers.go @@ -137,7 +137,7 @@ func (q *TimedWorkerQueue) CancelWork(key string) bool { } // GetWorkerUnsafe returns a TimedWorker corresponding to the given key. -// Unsafe method - workers have attached goroutines which can fire afater this function is called. +// Unsafe method - workers have attached goroutines which can fire after this function is called. func (q *TimedWorkerQueue) GetWorkerUnsafe(key string) *TimedWorker { q.Lock() defer q.Unlock() diff --git a/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go b/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go index 9489fd18437..0de8a9be5e6 100644 --- a/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go +++ b/pkg/controller/nodelifecycle/scheduler/timed_workers_test.go @@ -47,7 +47,7 @@ func TestExecute(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 5 { - t.Errorf("Espected testVal = 5, got %v", lastVal) + t.Errorf("Expected testVal = 5, got %v", lastVal) } } @@ -75,7 +75,7 @@ func TestExecuteDelayed(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 5 { - t.Errorf("Espected testVal = 5, got %v", lastVal) + t.Errorf("Expected testVal = 5, got %v", lastVal) } } @@ -105,7 +105,7 @@ func TestCancel(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 3 { - t.Errorf("Espected testVal = 3, got %v", lastVal) + t.Errorf("Expected testVal = 3, got %v", lastVal) } } @@ -136,6 +136,6 @@ func TestCancelAndReadd(t *testing.T) { wg.Wait() lastVal := atomic.LoadInt32(&testVal) if lastVal != 4 { - t.Errorf("Espected testVal = 4, got %v", lastVal) + t.Errorf("Expected testVal = 4, got %v", lastVal) } } diff --git a/pkg/controller/util/node/controller_utils.go b/pkg/controller/util/node/controller_utils.go index 7b58b261afb..0c127269060 100644 --- a/pkg/controller/util/node/controller_utils.go +++ b/pkg/controller/util/node/controller_utils.go @@ -18,20 +18,20 @@ limitations under the License. package node import ( + "context" "fmt" "strings" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - - v1 "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" appsv1listers "k8s.io/client-go/listers/apps/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" utilpod "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/kubelet/util/format" @@ -85,7 +85,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record. klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name) recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) - if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { + if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.Background(), pod.Name, metav1.DeleteOptions{}); err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. // There is nothing left to do with this pod. @@ -115,7 +115,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa var updatedPod *v1.Pod var err error - if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod); err != nil { + if updatedPod, err = kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.Background(), pod, metav1.UpdateOptions{}); err != nil { return nil, err } return updatedPod, nil @@ -147,7 +147,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s break } klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) - _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod) + _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.Background(), pod, metav1.UpdateOptions{}) if err != nil { if apierrors.IsNotFound(err) { // NotFound error means that pod was already deleted. diff --git a/pkg/yurtctl/cmd/clusterinfo/clusterinfo.go b/pkg/yurtctl/cmd/clusterinfo/clusterinfo.go index 3817866e1ad..e89859fcf09 100644 --- a/pkg/yurtctl/cmd/clusterinfo/clusterinfo.go +++ b/pkg/yurtctl/cmd/clusterinfo/clusterinfo.go @@ -17,6 +17,7 @@ limitations under the License. package clusterinfo import ( + "context" "fmt" "io" "os" @@ -87,7 +88,7 @@ func (o *ClusterInfoOptions) Validate() error { func (o *ClusterInfoOptions) Run() (err error) { key := projectinfo.GetEdgeWorkerLabelKey() - Nodes, err := o.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + Nodes, err := o.clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { return } diff --git a/pkg/yurtctl/cmd/convert/convert.go b/pkg/yurtctl/cmd/convert/convert.go index 6803418c404..aee65218120 100644 --- a/pkg/yurtctl/cmd/convert/convert.go +++ b/pkg/yurtctl/cmd/convert/convert.go @@ -17,6 +17,7 @@ limitations under the License. package convert import ( + "context" "fmt" "strings" "time" @@ -249,7 +250,7 @@ func (co *ConvertOptions) RunConvert() (err error) { klog.V(4).Info("the server version is valid") // 1.1. check the state of worker nodes - nodeLst, err := co.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeLst, err := co.clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { return } @@ -310,7 +311,7 @@ func (co *ConvertOptions) RunConvert() (err error) { } // 4. delete the system:controller:node-controller clusterrolebinding to disable node-controller - if err = co.clientSet.RbacV1().ClusterRoleBindings().Delete("system:controller:node-controller", &metav1.DeleteOptions{ + if err = co.clientSet.RbacV1().ClusterRoleBindings().Delete(context.Background(), "system:controller:node-controller", metav1.DeleteOptions{ PropagationPolicy: &kubeutil.PropagationPolicy, }); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("fail to delete clusterrolebinding system:controller:node-controller: %v", err) @@ -444,7 +445,7 @@ func deployYurttunnelAgent( // prepareClusterInfoConfigMap will create cluster-info configmap in kube-public namespace if it does not exist func prepareClusterInfoConfigMap(client *kubernetes.Clientset, file string) error { - info, err := client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) + info, err := client.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.Background(), bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { // Create the cluster-info ConfigMap with the associated RBAC rules if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, file); err != nil { diff --git a/pkg/yurtctl/cmd/convert/edgenode.go b/pkg/yurtctl/cmd/convert/edgenode.go index 845b1f8bd94..83386bedc0f 100644 --- a/pkg/yurtctl/cmd/convert/edgenode.go +++ b/pkg/yurtctl/cmd/convert/edgenode.go @@ -17,6 +17,7 @@ limitations under the License. package convert import ( + "context" "fmt" "io/ioutil" "net/http" @@ -187,7 +188,7 @@ func (c *ConvertEdgeNodeOptions) RunConvertEdgeNode() (err error) { } if len(c.EdgeNodes) > 1 || len(c.EdgeNodes) == 1 && c.EdgeNodes[0] != nodeName { // 2 remote edgenode convert - nodeLst, err := c.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeLst, err := c.clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -243,7 +244,7 @@ func (c *ConvertEdgeNodeOptions) RunConvertEdgeNode() (err error) { } } else { // 3. local edgenode convert - node, err := c.clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := c.clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/yurtctl/cmd/markautonomous/markautonomous.go b/pkg/yurtctl/cmd/markautonomous/markautonomous.go index ef6a1befb5c..8e45804946f 100644 --- a/pkg/yurtctl/cmd/markautonomous/markautonomous.go +++ b/pkg/yurtctl/cmd/markautonomous/markautonomous.go @@ -17,6 +17,7 @@ limitations under the License. package markautonomous import ( + "context" "fmt" "strings" @@ -109,7 +110,7 @@ func (mao *MarkAutonomousOptions) RunMarkAutonomous() (err error) { // make all edge nodes autonomous labelSelector := fmt.Sprintf("%s=true", projectinfo.GetEdgeWorkerLabelKey()) edgeNodeList, err = mao.CoreV1().Nodes(). - List(metav1.ListOptions{LabelSelector: labelSelector}) + List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector}) if err != nil { return } @@ -124,7 +125,7 @@ func (mao *MarkAutonomousOptions) RunMarkAutonomous() (err error) { // make only the specified edge nodes autonomous for _, nodeName := range mao.AutonomousNodes { var node *v1.Node - node, err = mao.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err = mao.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) if err != nil { return } diff --git a/pkg/yurtctl/cmd/revert/edgenode.go b/pkg/yurtctl/cmd/revert/edgenode.go index 2bb6c65e3ac..4fce10f508f 100644 --- a/pkg/yurtctl/cmd/revert/edgenode.go +++ b/pkg/yurtctl/cmd/revert/edgenode.go @@ -17,6 +17,7 @@ limitations under the License. package revert import ( + "context" "fmt" "os" "os/exec" @@ -149,7 +150,7 @@ func (r *RevertEdgeNodeOptions) RunRevertEdgeNode() (err error) { } if len(r.EdgeNodes) > 1 || len(r.EdgeNodes) == 1 && r.EdgeNodes[0] != nodeName { // 2. remote edgenode revert - nodeLst, err := r.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeLst, err := r.clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -194,7 +195,7 @@ func (r *RevertEdgeNodeOptions) RunRevertEdgeNode() (err error) { } } else { // 3. local edgenode revert - node, err := r.clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := r.clientSet.CoreV1().Nodes().Get(context.Background(), nodeName, metav1.GetOptions{}) if err != nil { return err } @@ -215,7 +216,7 @@ func (r *RevertEdgeNodeOptions) RunRevertEdgeNode() (err error) { } delete(node.Labels, projectinfo.GetEdgeWorkerLabelKey()) - if _, err = r.clientSet.CoreV1().Nodes().Update(node); err != nil { + if _, err = r.clientSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}); err != nil { return err } } else { diff --git a/pkg/yurtctl/cmd/revert/revert.go b/pkg/yurtctl/cmd/revert/revert.go index 8b1b412a786..2e7292a87f9 100644 --- a/pkg/yurtctl/cmd/revert/revert.go +++ b/pkg/yurtctl/cmd/revert/revert.go @@ -17,6 +17,7 @@ limitations under the License. package revert import ( + "context" "fmt" "github.com/spf13/cobra" @@ -125,7 +126,7 @@ func (ro *RevertOptions) RunRevert() (err error) { klog.V(4).Info("the server version is valid") // 1.1. check the state of worker nodes - nodeLst, err := ro.clientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + nodeLst, err := ro.clientSet.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { return } @@ -153,7 +154,7 @@ func (ro *RevertOptions) RunRevert() (err error) { if ok && isEdgeNode == "false" { // remove the label for both the cloud node delete(node.Labels, projectinfo.GetEdgeWorkerLabelKey()) - if _, err = ro.clientSet.CoreV1().Nodes().Update(&node); err != nil { + if _, err = ro.clientSet.CoreV1().Nodes().Update(context.Background(), &node, metav1.UpdateOptions{}); err != nil { return } } @@ -162,7 +163,7 @@ func (ro *RevertOptions) RunRevert() (err error) { // 3. remove the yurt controller manager if err = ro.clientSet.AppsV1().Deployments("kube-system"). - Delete("yurt-controller-manager", &metav1.DeleteOptions{ + Delete(context.Background(), "yurt-controller-manager", metav1.DeleteOptions{ PropagationPolicy: &kubeutil.PropagationPolicy, }); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("fail to remove yurt controller manager: %s", err) @@ -172,7 +173,7 @@ func (ro *RevertOptions) RunRevert() (err error) { // 3.1 remove the serviceaccount for yurt-controller-manager if err = ro.clientSet.CoreV1().ServiceAccounts("kube-system"). - Delete("yurt-controller-manager", &metav1.DeleteOptions{ + Delete(context.Background(), "yurt-controller-manager", metav1.DeleteOptions{ PropagationPolicy: &kubeutil.PropagationPolicy, }); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("fail to remove serviceaccount for yurt controller manager: %s", err) @@ -182,7 +183,7 @@ func (ro *RevertOptions) RunRevert() (err error) { // 3.2 remove the clusterrole for yurt-controller-manager if err = ro.clientSet.RbacV1().ClusterRoles(). - Delete("yurt-controller-manager", &metav1.DeleteOptions{ + Delete(context.Background(), "yurt-controller-manager", metav1.DeleteOptions{ PropagationPolicy: &kubeutil.PropagationPolicy, }); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("fail to remove clusterrole for yurt controller manager: %s", err) @@ -192,7 +193,7 @@ func (ro *RevertOptions) RunRevert() (err error) { // 3.3 remove the clusterrolebinding for yurt-controller-manager if err = ro.clientSet.RbacV1().ClusterRoleBindings(). - Delete("yurt-controller-manager", &metav1.DeleteOptions{ + Delete(context.Background(), "yurt-controller-manager", metav1.DeleteOptions{ PropagationPolicy: &kubeutil.PropagationPolicy, }); err != nil && !apierrors.IsNotFound(err) { klog.Errorf("fail to remove clusterrolebinding for yurt controller manager: %s", err) @@ -230,7 +231,8 @@ func (ro *RevertOptions) RunRevert() (err error) { }, }, } - if _, err = ro.clientSet.RbacV1().ClusterRoleBindings().Create(ncClusterrolebinding); err != nil && !apierrors.IsAlreadyExists(err) { + if _, err = ro.clientSet.RbacV1().ClusterRoleBindings().Create(context.Background(), ncClusterrolebinding, + metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) { klog.Errorf("fail to create clusterrolebinding system:controller:node-controller: %v", err) return } @@ -256,8 +258,8 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 1. remove the DaemonSet if err := client.AppsV1(). Deployments(constants.YurttunnelNamespace). - Delete(constants.YurttunnelServerComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the daemonset/%s: %s", constants.YurttunnelServerComponentName, err) } @@ -265,8 +267,8 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 2.1 remove the Service if err := client.CoreV1().Services(constants.YurttunnelNamespace). - Delete(constants.YurttunnelServerSvcName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerSvcName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the service/%s: %s", constants.YurttunnelServerSvcName, err) } @@ -274,8 +276,8 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 2.2 remove the internal Service(type=ClusterIP) if err := client.CoreV1().Services(constants.YurttunnelNamespace). - Delete(constants.YurttunnelServerInternalSvcName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerInternalSvcName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the service/%s: %s", constants.YurttunnelServerInternalSvcName, err) } @@ -283,8 +285,8 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 3. remove the ClusterRoleBinding if err := client.RbacV1().ClusterRoleBindings(). - Delete(constants.YurttunnelServerComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the clusterrolebinding/%s: %s", constants.YurttunnelServerComponentName, err) } @@ -292,8 +294,8 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 4. remove the SerivceAccount if err := client.CoreV1().ServiceAccounts(constants.YurttunnelNamespace). - Delete(constants.YurttunnelServerComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the serviceaccount/%s: %s", constants.YurttunnelServerComponentName, err) } @@ -301,16 +303,16 @@ func removeYurtTunnelServer(client *kubernetes.Clientset) error { // 5. remove the ClusterRole if err := client.RbacV1().ClusterRoles(). - Delete(constants.YurttunnelServerComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the clusterrole/%s: %s", constants.YurttunnelServerComponentName, err) } // 6. remove the ConfigMap if err := client.CoreV1().ConfigMaps(constants.YurttunnelNamespace). - Delete(constants.YurttunnelServerCmName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelServerCmName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the configmap/%s: %s", constants.YurttunnelServerCmName, err) } @@ -322,8 +324,8 @@ func removeYurtTunnelAgent(client *kubernetes.Clientset) error { // 1. remove the DaemonSet if err := client.AppsV1(). DaemonSets(constants.YurttunnelNamespace). - Delete(constants.YurttunnelAgentComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelAgentComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the daemonset/%s: %s", constants.YurttunnelAgentComponentName, err) } @@ -331,8 +333,8 @@ func removeYurtTunnelAgent(client *kubernetes.Clientset) error { // 2. remove the ClusterRoleBinding if err := client.RbacV1().ClusterRoleBindings(). - Delete(constants.YurttunnelAgentComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelAgentComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the clusterrolebinding/%s: %s", constants.YurttunnelAgentComponentName, err) } @@ -340,8 +342,8 @@ func removeYurtTunnelAgent(client *kubernetes.Clientset) error { // 3. remove the ClusterRole if err := client.RbacV1().ClusterRoles(). - Delete(constants.YurttunnelAgentComponentName, - &metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + Delete(context.Background(), constants.YurttunnelAgentComponentName, + metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("fail to delete the clusterrole/%s: %s", constants.YurttunnelAgentComponentName, err) } diff --git a/pkg/yurtctl/lock/lock.go b/pkg/yurtctl/lock/lock.go index 8311192586f..4f1ebce3b56 100644 --- a/pkg/yurtctl/lock/lock.go +++ b/pkg/yurtctl/lock/lock.go @@ -17,6 +17,7 @@ limitations under the License. package lock import ( + "context" "errors" "strconv" "time" @@ -45,7 +46,7 @@ var ( // AcquireLock tries to acquire the lock lock configmap/yurtctl-lock func AcquireLock(cli *kubernetes.Clientset) error { lockCm, err := cli.CoreV1().ConfigMaps("kube-system"). - Get(constants.YurtctlLockConfigMapName, metav1.GetOptions{}) + Get(context.Background(), constants.YurtctlLockConfigMapName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { // the lock is not exist, create one @@ -61,7 +62,7 @@ func AcquireLock(cli *kubernetes.Clientset) error { }, } if _, err := cli.CoreV1().ConfigMaps("kube-system"). - Create(cm); err != nil { + Create(context.Background(), cm, metav1.CreateOptions{}); err != nil { klog.Error("the lock configmap/yurtctl-lock is not found, " + "but fail to create a new one") return ErrAcquireLock @@ -113,7 +114,7 @@ func acquireLockAndUpdateCm(cli kubernetes.Interface, lockCm *v1.ConfigMap) erro lockCm.Annotations[AnnotationIsLocked] = "true" lockCm.Annotations[AnnotationAcquireTime] = strconv.FormatInt(time.Now().Unix(), 10) if _, err := cli.CoreV1().ConfigMaps("kube-system"). - Update(lockCm); err != nil { + Update(context.Background(), lockCm, metav1.UpdateOptions{}); err != nil { if apierrors.IsResourceExpired(err) { klog.Error("the lock is held by others") return ErrAcquireLock @@ -127,7 +128,7 @@ func acquireLockAndUpdateCm(cli kubernetes.Interface, lockCm *v1.ConfigMap) erro // ReleaseLock releases the lock configmap/yurtctl-lock func ReleaseLock(cli *kubernetes.Clientset) error { lockCm, err := cli.CoreV1().ConfigMaps("kube-system"). - Get(constants.YurtctlLockConfigMapName, metav1.GetOptions{}) + Get(context.Background(), constants.YurtctlLockConfigMapName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { klog.Error("lock is not found when try to release, " + @@ -150,7 +151,7 @@ func ReleaseLock(cli *kubernetes.Clientset) error { lockCm.Annotations[AnnotationIsLocked] = "false" delete(lockCm.Annotations, AnnotationAcquireTime) - _, err = cli.CoreV1().ConfigMaps("kube-system").Update(lockCm) + _, err = cli.CoreV1().ConfigMaps("kube-system").Update(context.Background(), lockCm, metav1.UpdateOptions{}) if err != nil { if apierrors.IsResourceExpired(err) { klog.Error("lock has been touched by others during release, " + diff --git a/pkg/yurtctl/util/kubernetes/util.go b/pkg/yurtctl/util/kubernetes/util.go index e2c75d318b0..504b40e8707 100644 --- a/pkg/yurtctl/util/kubernetes/util.go +++ b/pkg/yurtctl/util/kubernetes/util.go @@ -17,6 +17,7 @@ limitations under the License. package kubernetes import ( + "context" "errors" "fmt" "os" @@ -85,7 +86,7 @@ func CreateServiceAccountFromYaml(cliSet *kubernetes.Clientset, ns, saTmpl strin if !ok { return fmt.Errorf("fail to assert serviceaccount: %v", err) } - _, err = cliSet.CoreV1().ServiceAccounts(ns).Create(sa) + _, err = cliSet.CoreV1().ServiceAccounts(ns).Create(context.Background(), sa, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the serviceaccount/%s: %v", sa.Name, err) } @@ -103,7 +104,7 @@ func CreateClusterRoleFromYaml(cliSet *kubernetes.Clientset, crTmpl string) erro if !ok { return fmt.Errorf("fail to assert clusterrole: %v", err) } - _, err = cliSet.RbacV1().ClusterRoles().Create(cr) + _, err = cliSet.RbacV1().ClusterRoles().Create(context.Background(), cr, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the clusterrole/%s: %v", cr.Name, err) } @@ -121,7 +122,7 @@ func CreateClusterRoleBindingFromYaml(cliSet *kubernetes.Clientset, crbTmpl stri if !ok { return fmt.Errorf("fail to assert clusterrolebinding: %v", err) } - _, err = cliSet.RbacV1().ClusterRoleBindings().Create(crb) + _, err = cliSet.RbacV1().ClusterRoleBindings().Create(context.Background(), crb, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the clusterrolebinding/%s: %v", crb.Name, err) } @@ -139,7 +140,7 @@ func CreateConfigMapFromYaml(cliSet *kubernetes.Clientset, ns, cmTmpl string) er if !ok { return fmt.Errorf("fail to assert configmap: %v", err) } - _, err = cliSet.CoreV1().ConfigMaps(ns).Create(cm) + _, err = cliSet.CoreV1().ConfigMaps(ns).Create(context.Background(), cm, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the configmap/%s: %v", cm.Name, err) } @@ -148,8 +149,8 @@ func CreateConfigMapFromYaml(cliSet *kubernetes.Clientset, ns, cmTmpl string) er } // CreateDeployFromYaml creates the Deployment from the yaml template. -func CreateDeployFromYaml(cliSet *kubernetes.Clientset, ns, dplyTmpl string, context interface{}) error { - ycmdp, err := tmplutil.SubsituteTemplate(dplyTmpl, context) +func CreateDeployFromYaml(cliSet *kubernetes.Clientset, ns, dplyTmpl string, ctx interface{}) error { + ycmdp, err := tmplutil.SubsituteTemplate(dplyTmpl, ctx) if err != nil { return err } @@ -161,7 +162,7 @@ func CreateDeployFromYaml(cliSet *kubernetes.Clientset, ns, dplyTmpl string, con if !ok { return errors.New("fail to assert Deployment") } - if _, err = cliSet.AppsV1().Deployments(ns).Create(dply); err != nil { + if _, err = cliSet.AppsV1().Deployments(ns).Create(context.Background(), dply, metav1.CreateOptions{}); err != nil { return err } klog.V(4).Infof("the deployment/%s is deployed", dply.Name) @@ -169,8 +170,8 @@ func CreateDeployFromYaml(cliSet *kubernetes.Clientset, ns, dplyTmpl string, con } // CreateDaemonSetFromYaml creates the DaemonSet from the yaml template. -func CreateDaemonSetFromYaml(cliSet *kubernetes.Clientset, dsTmpl string, context interface{}) error { - ytadstmp, err := tmplutil.SubsituteTemplate(dsTmpl, context) +func CreateDaemonSetFromYaml(cliSet *kubernetes.Clientset, dsTmpl string, ctx interface{}) error { + ytadstmp, err := tmplutil.SubsituteTemplate(dsTmpl, ctx) if err != nil { return err } @@ -182,7 +183,7 @@ func CreateDaemonSetFromYaml(cliSet *kubernetes.Clientset, dsTmpl string, contex if !ok { return fmt.Errorf("fail to assert daemonset: %v", err) } - _, err = cliSet.AppsV1().DaemonSets("kube-system").Create(ds) + _, err = cliSet.AppsV1().DaemonSets("kube-system").Create(context.Background(), ds, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the daemonset/%s: %v", ds.Name, err) } @@ -200,7 +201,7 @@ func CreateServiceFromYaml(cliSet *kubernetes.Clientset, svcTmpl string) error { if !ok { return fmt.Errorf("fail to assert service: %v", err) } - _, err = cliSet.CoreV1().Services("kube-system").Create(svc) + _, err = cliSet.CoreV1().Services("kube-system").Create(context.Background(), svc, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("fail to create the service/%s: %s", svc.Name, err) } @@ -221,7 +222,7 @@ func YamlToObject(yamlContent []byte) (runtime.Object, error) { // LabelNode add a new label (=) to the given node func LabelNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) (*v1.Node, error) { node.Labels[key] = val - newNode, err := cliSet.CoreV1().Nodes().Update(node) + newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) if err != nil { return nil, err } @@ -231,7 +232,7 @@ func LabelNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) (*v // AnnotateNode add a new annotation (=) to the given node func AnnotateNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) (*v1.Node, error) { node.Annotations[key] = val - newNode, err := cliSet.CoreV1().Nodes().Update(node) + newNode, err := cliSet.CoreV1().Nodes().Update(context.Background(), node, metav1.UpdateOptions{}) if err != nil { return nil, err } @@ -240,7 +241,7 @@ func AnnotateNode(cliSet *kubernetes.Clientset, node *v1.Node, key, val string) // RunJobAndCleanup runs the job, wait for it to be complete, and delete it func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, period time.Duration) error { - job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(job) + job, err := cliSet.BatchV1().Jobs(job.GetNamespace()).Create(context.Background(), job, metav1.CreateOptions{}) if err != nil { return err } @@ -251,7 +252,7 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p return errors.New("wait for job to be complete timeout") case <-time.After(period): job, err := cliSet.BatchV1().Jobs(job.GetNamespace()). - Get(job.GetName(), metav1.GetOptions{}) + Get(context.Background(), job.GetName(), metav1.GetOptions{}) if err != nil { klog.Errorf("fail to get job(%s) when waiting for it to be succeeded: %s", job.GetName(), err) @@ -259,7 +260,7 @@ func RunJobAndCleanup(cliSet *kubernetes.Clientset, job *batchv1.Job, timeout, p } if job.Status.Succeeded == *job.Spec.Completions { if err := cliSet.BatchV1().Jobs(job.GetNamespace()). - Delete(job.GetName(), &metav1.DeleteOptions{ + Delete(context.Background(), job.GetName(), metav1.DeleteOptions{ PropagationPolicy: &PropagationPolicy, }); err != nil { klog.Errorf("fail to delete succeeded servant job(%s): %s", @@ -392,7 +393,7 @@ func GetOrCreateJoinTokenString(cliSet *kubernetes.Clientset) (string, error) { FieldSelector: tokenSelector.String(), } klog.V(1).Infoln("[token] retrieving list of bootstrap tokens") - secrets, err := cliSet.CoreV1().Secrets(metav1.NamespaceSystem).List(listOptions) + secrets, err := cliSet.CoreV1().Secrets(metav1.NamespaceSystem).List(context.Background(), listOptions) if err != nil { return "", fmt.Errorf("%v%s", err, "failed to list bootstrap tokens") } diff --git a/pkg/yurthub/cachemanager/cache_agent.go b/pkg/yurthub/cachemanager/cache_agent.go index 1d1317570cb..751d6574c60 100644 --- a/pkg/yurthub/cachemanager/cache_agent.go +++ b/pkg/yurthub/cachemanager/cache_agent.go @@ -19,6 +19,8 @@ package cachemanager import ( "strings" + "github.com/openyurtio/openyurt/pkg/projectinfo" + "k8s.io/klog" ) @@ -28,7 +30,7 @@ var ( "kube-proxy", "flanneld", "coredns", - "edge-tunnel-agent", + projectinfo.GetAgentName(), } cacheAgentsKey = "_internal/cache-manager/cache-agent.conf" sepForAgent = "," diff --git a/pkg/yurthub/cachemanager/cache_manager.go b/pkg/yurthub/cachemanager/cache_manager.go index 0ee44d9655d..c2b8c4b195b 100644 --- a/pkg/yurthub/cachemanager/cache_manager.go +++ b/pkg/yurthub/cachemanager/cache_manager.go @@ -32,6 +32,7 @@ import ( "github.com/openyurtio/openyurt/pkg/yurthub/storage" "github.com/openyurtio/openyurt/pkg/yurthub/util" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -46,7 +47,7 @@ import ( // CacheManager is an adaptor to cache runtime object data into backend storage type CacheManager interface { - CacheResponse(ctx context.Context, prc io.ReadCloser, stopCh <-chan struct{}) error + CacheResponse(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) error QueryCache(req *http.Request) (runtime.Object, error) UpdateCacheAgents(agents []string) error ListCacheAgents() []string @@ -82,7 +83,8 @@ func NewCacheManager( } // CacheResponse cache response of request into backend storage -func (cm *cacheManager) CacheResponse(ctx context.Context, prc io.ReadCloser, stopCh <-chan struct{}) error { +func (cm *cacheManager) CacheResponse(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) error { + ctx := req.Context() info, _ := apirequest.RequestInfoFrom(ctx) if isWatch(ctx) { return cm.saveWatchObject(ctx, info, prc, stopCh) @@ -235,11 +237,16 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re addObjCnt := 0 comp, _ := util.ClientComponentFrom(ctx) - reqContentType, _ := util.ReqContentTypeFrom(ctx) + respContentType, _ := util.RespContentTypeFrom(ctx) + s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) + if s == nil { + klog.Errorf("failed to create serializer in saveWatchObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("failed to create serializer in saveWatchObject, %s", util.ReqInfoString(info)) + } accessor := meta.NewAccessor() - d, err := serializer.CreateWatchDecoder(reqContentType, info.APIGroup, info.APIVersion, info.Resource, r) + d, err := s.WatchDecoder(r) if err != nil { klog.Errorf("saveWatchObject ended with error, %v", err) return err @@ -310,15 +317,14 @@ func (cm *cacheManager) saveWatchObject(ctx context.Context, info *apirequest.Re } func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.RequestInfo, b []byte) error { - reqContentType, _ := util.ReqContentTypeFrom(ctx) respContentType, _ := util.RespContentTypeFrom(ctx) - serializers, err := cm.serializerManager.CreateSerializers(reqContentType, info.APIGroup, info.APIVersion, info.Resource) - if err != nil { - klog.Errorf("failed to create serializers in saveListObject, %v", err) - return err + s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) + if s == nil { + klog.Errorf("failed to create serializer in saveListObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("failed to create serializer in saveListObject, %s", util.ReqInfoString(info)) } - list, err := serializer.DecodeResp(serializers, b, reqContentType, respContentType) + list, err := s.Decode(b) if err != nil || list == nil { klog.Errorf("failed to decode response in saveOneObject %v", err) return err @@ -352,11 +358,8 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req // list returns no objects key, _ := util.KeyFunc(comp, info.Resource, info.Namespace, "") return cm.storage.Create(key, nil) - } else if info.Name != "" { + } else if info.Name != "" && len(items) == 1 { // list with fieldSelector=metadata.name=xxx - if len(items) != 1 { - return fmt.Errorf("%s with fieldSelector=metadata.name=%s, but return more than one objects: %d", util.ReqInfoString(info), info.Name, len(items)) - } accessor.SetKind(items[0], kind) accessor.SetAPIVersion(items[0], apiVersion) name, _ := accessor.Name(items[0]) @@ -373,7 +376,7 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req return err } else { - // list all of objects or fieldselector/labelselector + // list all objects or with fieldselector/labelselector rootKey, _ := util.KeyFunc(comp, info.Resource, info.Namespace, info.Name) objs := make(map[string]runtime.Object) for i := range items { @@ -395,19 +398,17 @@ func (cm *cacheManager) saveListObject(ctx context.Context, info *apirequest.Req func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.RequestInfo, b []byte) error { comp, _ := util.ClientComponentFrom(ctx) - reqContentType, _ := util.ReqContentTypeFrom(ctx) respContentType, _ := util.RespContentTypeFrom(ctx) - serializers, err := cm.serializerManager.CreateSerializers(reqContentType, info.APIGroup, info.APIVersion, info.Resource) - if err != nil { - klog.Errorf("failed to create serializers in saveOneObject: %s, %v", util.ReqInfoString(info), err) - return err + s := cm.serializerManager.CreateSerializer(respContentType, info.APIGroup, info.APIVersion, info.Resource) + if s == nil { + klog.Errorf("failed to create serializer in saveOneObject, %s", util.ReqInfoString(info)) + return fmt.Errorf("failed to create serializer in saveOneObject, %s", util.ReqInfoString(info)) } - accessor := meta.NewAccessor() - obj, err := serializer.DecodeResp(serializers, b, reqContentType, respContentType) + obj, err := s.Decode(b) if err != nil { - klog.Errorf("failed to decode response in saveOneObject(reqContentType:%s, respContentType:%s): %s, %v", reqContentType, respContentType, util.ReqInfoString(info), err) + klog.Errorf("failed to decode response in saveOneObject(respContentType:%s): %s, %v", respContentType, util.ReqInfoString(info), err) return err } else if obj == nil { klog.Info("failed to decode nil object. skip cache") @@ -418,6 +419,7 @@ func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.Requ } var name string + accessor := meta.NewAccessor() if isCreate(ctx) { name, _ = accessor.Name(obj) } else { @@ -446,10 +448,15 @@ func (cm *cacheManager) saveOneObject(ctx context.Context, info *apirequest.Requ } func (cm *cacheManager) saveOneObjectWithValidation(key string, obj runtime.Object) error { + accessor := meta.NewAccessor() + if isNotAssignedPod(obj) { + ns, _ := accessor.Namespace(obj) + name, _ := accessor.Name(obj) + return fmt.Errorf("pod(%s/%s) is not assigned to a node, skip cache it.", ns, name) + } + oldObj, err := cm.storage.Get(key) if err == nil && oldObj != nil { - accessor := meta.NewAccessor() - oldRv, err := accessor.ResourceVersion(oldObj) if err != nil { klog.Errorf("failed to get old object resource version for %s, %v", key, err) @@ -479,6 +486,21 @@ func (cm *cacheManager) saveOneObjectWithValidation(key string, obj runtime.Obje } } +// isNotAssignedPod check pod is assigned to node or not +// when delete pod of statefulSet, kubelet may get pod unassigned. +func isNotAssignedPod(obj runtime.Object) bool { + pod, ok := obj.(*v1.Pod) + if !ok { + return false + } + + if pod.Spec.NodeName == "" { + return true + } + + return false +} + func isList(ctx context.Context) bool { if info, ok := apirequest.RequestInfoFrom(ctx); ok { return info.Verb == "list" @@ -558,6 +580,7 @@ func (cm *cacheManager) CanCacheFor(req *http.Request) bool { // because func queryListObject() will get all pods for both requests instead of // getting pods by request selector. so cache manager can not support same path list // requests that has different selector. + klog.Warningf("list requests that have the same path but with different selector, skip cache for %s", util.ReqString(req)) return false } } else { @@ -568,9 +591,8 @@ func (cm *cacheManager) CanCacheFor(req *http.Request) bool { // getting pods by request selector. so cache manager can not support getting same resource // list requests that has different path. for k := range cm.listSelectorCollector { - if len(k) > len(key) && strings.Contains(k, key) { - return false - } else if len(k) < len(key) && strings.Contains(key, k) { + if (len(k) > len(key) && strings.Contains(k, key)) || (len(k) < len(key) && strings.Contains(key, k)) { + klog.Warningf("list requests that get the same resources but with different path, skip cache for %s", util.ReqString(req)) return false } } diff --git a/pkg/yurthub/cachemanager/cache_manager_test.go b/pkg/yurthub/cachemanager/cache_manager_test.go index 721567eb6ee..257bba9284c 100644 --- a/pkg/yurthub/cachemanager/cache_manager_test.go +++ b/pkg/yurthub/cachemanager/cache_manager_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/openyurtio/openyurt/pkg/projectinfo" "github.com/openyurtio/openyurt/pkg/yurthub/kubernetes/serializer" proxyutil "github.com/openyurtio/openyurt/pkg/yurthub/proxy/util" "github.com/openyurtio/openyurt/pkg/yurthub/storage" @@ -40,13 +41,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - runtimejson "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/streaming" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/endpoints/filters" - "k8s.io/client-go/kubernetes/scheme" - restclientwatch "k8s.io/client-go/rest/watch" ) var ( @@ -79,14 +75,37 @@ func TestCacheGetResponse(t *testing.T) { resource string namespaced bool expectResult struct { - err bool + err error rv string name string ns string kind string } - cacheErr error + cacheResponseErr bool }{ + "cache response for pod with not assigned node": { + group: "", + version: "v1", + key: "kubelet/pods/default/mypod1", + inputObj: runtime.Object(&v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "mypod1", + Namespace: "default", + ResourceVersion: "1", + }, + }), + userAgent: "kubelet", + accept: "application/json", + verb: "GET", + path: "/api/v1/namespaces/default/pods/mypod1", + resource: "pods", + namespaced: true, + cacheResponseErr: true, + }, "cache response for get pod": { group: "", version: "v1", @@ -101,6 +120,9 @@ func TestCacheGetResponse(t *testing.T) { Namespace: "default", ResourceVersion: "1", }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, }), userAgent: "kubelet", accept: "application/json", @@ -109,7 +131,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "pods", namespaced: true, expectResult: struct { - err bool + err error rv string name string ns string @@ -135,6 +157,9 @@ func TestCacheGetResponse(t *testing.T) { Namespace: "default", ResourceVersion: "3", }, + Spec: v1.PodSpec{ + NodeName: "node1", + }, }), userAgent: "kubelet", accept: "application/json", @@ -143,7 +168,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "pods", namespaced: true, expectResult: struct { - err bool + err error rv string name string ns string @@ -176,7 +201,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "nodes", namespaced: false, expectResult: struct { - err bool + err error rv string name string ns string @@ -208,7 +233,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "nodes", namespaced: false, expectResult: struct { - err bool + err error rv string name string ns string @@ -242,7 +267,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "crontabs", namespaced: true, expectResult: struct { - err bool + err error rv string name string ns string @@ -276,7 +301,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "crontabs", namespaced: true, expectResult: struct { - err bool + err error rv string name string ns string @@ -309,7 +334,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "foos", namespaced: false, expectResult: struct { - err bool + err error rv string name string ns string @@ -341,7 +366,7 @@ func TestCacheGetResponse(t *testing.T) { resource: "foos", namespaced: false, expectResult: struct { - err bool + err error rv string name string ns string @@ -371,42 +396,43 @@ func TestCacheGetResponse(t *testing.T) { verb: "GET", path: "/api/v1/nodes/test", resource: "nodes", - cacheErr: storage.ErrStorageNotFound, - }, - "cache response for nil object": { - group: "", - version: "v1", - key: "kubelet/nodes/test", - inputObj: nil, - userAgent: "kubelet", - accept: "application/json", - verb: "GET", - path: "/api/v1/nodes/test", - resource: "nodes", expectResult: struct { - err bool + err error rv string name string ns string kind string }{ - err: true, + err: storage.ErrStorageNotFound, }, }, + "cache response for nil object": { + group: "", + version: "v1", + key: "kubelet/nodes/test", + inputObj: nil, + userAgent: "kubelet", + accept: "application/json", + verb: "GET", + path: "/api/v1/nodes/test", + resource: "nodes", + cacheResponseErr: true, + }, } accessor := meta.NewAccessor() resolver := newTestRequestInfoResolver() for k, tt := range testcases { t.Run(k, func(t *testing.T) { - encoder, err := serializerM.CreateSerializers(tt.accept, tt.group, tt.version, tt.resource) + s := serializerM.CreateSerializer(tt.accept, tt.group, tt.version, tt.resource) + encoder, err := s.Encoder(tt.accept, nil) if err != nil { - t.Fatalf("could not create serializer, %v", err) + t.Fatalf("could not create encoder, %v", err) } buf := bytes.NewBuffer([]byte{}) if tt.inputObj != nil { - err = encoder.Encoder.Encode(tt.inputObj, buf) + err = encoder.Encode(tt.inputObj, buf) if err != nil { t.Fatalf("could not encode input object, %v", err) } @@ -425,8 +451,9 @@ func TestCacheGetResponse(t *testing.T) { var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx = util.WithRespContentType(ctx, tt.accept) + req = req.WithContext(ctx) prc := ioutil.NopCloser(buf) - err = yurtCM.CacheResponse(ctx, prc, nil) + err = yurtCM.CacheResponse(req, prc, nil) }) handler = proxyutil.WithRequestContentType(handler) @@ -434,8 +461,10 @@ func TestCacheGetResponse(t *testing.T) { handler = filters.WithRequestInfo(handler, resolver) handler.ServeHTTP(httptest.NewRecorder(), req) - if tt.expectResult.err && err == nil { + if tt.cacheResponseErr && err == nil { t.Errorf("expect err, but do not get error") + } else if !tt.cacheResponseErr && err != nil { + t.Errorf("expect no err, but got error %v", err) } if len(tt.expectResult.name) == 0 { @@ -444,10 +473,10 @@ func TestCacheGetResponse(t *testing.T) { obj, err := sWrapper.Get(tt.key) if err != nil || obj == nil { - if tt.cacheErr != err { - t.Errorf("expect get error %v, but got %v", tt.cacheErr, err) + if tt.expectResult.err != err { + t.Errorf("expect get error %v, but got %v", tt.expectResult.err, err) } - t.Logf("get expected err %v for key %s", tt.cacheErr, tt.key) + t.Logf("get expected err %v for key %s", tt.expectResult.err, tt.key) } else { name, _ := accessor.Name(obj) rv, _ := accessor.ResourceVersion(obj) @@ -481,19 +510,12 @@ func TestCacheGetResponse(t *testing.T) { } } -func getEncoder() runtime.Encoder { - jsonSerializer := runtimejson.NewSerializer(runtimejson.DefaultMetaFactory, scheme.Scheme, scheme.Scheme, false) - directCodecFactory := runtimeserializer.WithoutConversionCodecFactory{ - CodecFactory: scheme.Codecs, - } - return directCodecFactory.EncoderForVersion(jsonSerializer, v1.SchemeGroupVersion) -} - func TestCacheWatchResponse(t *testing.T) { mkPod := func(id string, rv string) *v1.Pod { return &v1.Pod{ TypeMeta: metav1.TypeMeta{APIVersion: "", Kind: "Pod"}, ObjectMeta: metav1.ObjectMeta{Name: id, Namespace: "default", ResourceVersion: rv}, + Spec: v1.PodSpec{NodeName: "node1"}, } } @@ -533,6 +555,7 @@ func TestCacheWatchResponse(t *testing.T) { accept string verb string path string + resource string namespaced bool expectResult struct { err bool @@ -552,6 +575,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/api/v1/namespaces/default/pods?watch=true", + resource: "pods", namespaced: true, expectResult: struct { err bool @@ -578,6 +602,7 @@ func TestCacheWatchResponse(t *testing.T) { verb: "GET", path: "/api/v1/namespaces/default/pods?watch=true", namespaced: true, + resource: "pods", expectResult: struct { err bool data map[string]struct{} @@ -600,6 +625,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/api/v1/namespaces/default/pods?watch=true", + resource: "pods", namespaced: true, expectResult: struct { err bool @@ -624,6 +650,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/api/v1/namespaces/default/pods?watch=true", + resource: "pods", namespaced: true, expectResult: struct { err bool @@ -648,6 +675,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/apis/stable.example.com/v1/namespaces/default/crontabs?watch=true", + resource: "crontabs", namespaced: true, expectResult: struct { err bool @@ -673,6 +701,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/apis/stable.example.com/v1/namespaces/default/crontabs?watch=true", + resource: "crontabs", namespaced: true, expectResult: struct { err bool @@ -696,6 +725,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/apis/stable.example.com/v1/namespaces/default/crontabs?watch=true", + resource: "crontabs", namespaced: true, expectResult: struct { err bool @@ -720,6 +750,7 @@ func TestCacheWatchResponse(t *testing.T) { accept: "application/json", verb: "GET", path: "/apis/stable.example.com/v1/namespaces/default/crontabs?watch=true", + resource: "crontabs", namespaced: true, expectResult: struct { err bool @@ -732,18 +763,17 @@ func TestCacheWatchResponse(t *testing.T) { }, } - accessor := meta.NewAccessor() resolver := newTestRequestInfoResolver() for k, tt := range testcases { t.Run(k, func(t *testing.T) { + s := serializerM.CreateSerializer(tt.accept, tt.group, tt.version, tt.resource) r, w := io.Pipe() go func(w *io.PipeWriter) { //For unregistered GVKs, the normal encoding is used by default and the original GVK information is set - encoder := restclientwatch.NewEncoder(streaming.NewEncoder(w, getEncoder()), getEncoder()) for i := range tt.inputObj { - if err := encoder.Encode(&tt.inputObj[i]); err != nil { - t.Errorf("%d: unexpected error: %v", i, err) + if _, err := s.WatchEncode(w, &tt.inputObj[i]); err != nil { + t.Errorf("%d: encode watch unexpected error: %v", i, err) continue } time.Sleep(100 * time.Millisecond) @@ -766,7 +796,8 @@ func TestCacheWatchResponse(t *testing.T) { var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx = util.WithRespContentType(ctx, tt.accept) - err = yurtCM.CacheResponse(ctx, rc, nil) + req = req.WithContext(ctx) + err = yurtCM.CacheResponse(req, rc, nil) }) handler = proxyutil.WithRequestContentType(handler) @@ -776,6 +807,8 @@ func TestCacheWatchResponse(t *testing.T) { if tt.expectResult.err && err == nil { t.Errorf("expect err, but do not got err") + } else if err != nil && err != io.EOF { + t.Errorf("failed to cache resposne, %v", err) } if len(tt.expectResult.data) == 0 { @@ -787,26 +820,8 @@ func TestCacheWatchResponse(t *testing.T) { t.Errorf("failed to get object from storage") } - if len(objs) != len(tt.expectResult.data) { - t.Errorf("Got %d objects, but expect %d objects", len(objs), len(tt.expectResult.data)) - } - - for _, obj := range objs { - name, _ := accessor.Name(obj) - ns, _ := accessor.Namespace(obj) - rv, _ := accessor.ResourceVersion(obj) - kind, _ := accessor.Kind(obj) - - var objKey string - if tt.namespaced { - objKey = fmt.Sprintf("%s-%s-%s-%s", strings.ToLower(kind), ns, name, rv) - } else { - objKey = fmt.Sprintf("%s-%s-%s", strings.ToLower(kind), name, rv) - } - - if _, ok := tt.expectResult.data[objKey]; !ok { - t.Errorf("Got %s %s/%s with rv %s", kind, ns, name, rv) - } + if !compareObjectsAndKeys(t, objs, tt.namespaced, tt.expectResult.data) { + t.Errorf("got unexpected objects for keys for watch request") } err = sWrapper.DeleteCollection("kubelet") @@ -1200,17 +1215,17 @@ func TestCacheListResponse(t *testing.T) { }, } - accessor := meta.NewAccessor() resolver := newTestRequestInfoResolver() for k, tt := range testcases { t.Run(k, func(t *testing.T) { - encoder, err := serializerM.CreateSerializers(tt.accept, tt.group, tt.version, tt.resource) + s := serializerM.CreateSerializer(tt.accept, tt.group, tt.version, tt.resource) + encoder, err := s.Encoder(tt.accept, nil) if err != nil { - t.Fatalf("could not create serializer, %v", err) + t.Fatalf("could not create encoder, %v", err) } buf := bytes.NewBuffer([]byte{}) - err = encoder.Encoder.Encode(tt.inputObj, buf) + err = encoder.Encode(tt.inputObj, buf) if err != nil { t.Fatalf("could not encode input object, %v", err) } @@ -1228,8 +1243,9 @@ func TestCacheListResponse(t *testing.T) { var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() ctx = util.WithRespContentType(ctx, tt.accept) + req = req.WithContext(ctx) prc := ioutil.NopCloser(buf) - err = yurtCM.CacheResponse(ctx, prc, nil) + err = yurtCM.CacheResponse(req, prc, nil) }) handler = proxyutil.WithRequestContentType(handler) @@ -1253,26 +1269,8 @@ func TestCacheListResponse(t *testing.T) { } } - if len(objs) != len(tt.expectResult.data) { - t.Errorf("Got %d objects, but expect %d objects", len(objs), len(tt.expectResult.data)) - } - - for _, obj := range objs { - name, _ := accessor.Name(obj) - ns, _ := accessor.Namespace(obj) - rv, _ := accessor.ResourceVersion(obj) - kind, _ := accessor.Kind(obj) - - var objKey string - if tt.namespaced { - objKey = fmt.Sprintf("%s-%s-%s-%s", strings.ToLower(kind), ns, name, rv) - } else { - objKey = fmt.Sprintf("%s-%s-%s", strings.ToLower(kind), name, rv) - } - - if _, ok := tt.expectResult.data[objKey]; !ok { - t.Errorf("Got %s %s/%s with rv %s", kind, ns, name, rv) - } + if !compareObjectsAndKeys(t, objs, tt.namespaced, tt.expectResult.data) { + t.Errorf("got unexpected objects for keys") } } err = sWrapper.DeleteCollection("kubelet") @@ -2191,26 +2189,8 @@ func TestQueryCacheForList(t *testing.T) { t.Errorf("Got rv %s, but expect rv %s", rv, tt.expectResult.rv) } - if len(items) != len(tt.expectResult.data) { - t.Errorf("Got %d objects, but expect %d objects", len(items), len(tt.expectResult.data)) - } - - for i := range items { - kind, _ := accessor.Kind(items[i]) - ns, _ := accessor.Namespace(items[i]) - name, _ := accessor.Name(items[i]) - itemRv, _ := accessor.ResourceVersion(items[i]) - - var itemKey string - if tt.namespaced { - itemKey = fmt.Sprintf("%s-%s-%s-%s", strings.ToLower(kind), ns, name, itemRv) - } else { - itemKey = fmt.Sprintf("%s-%s-%s", strings.ToLower(kind), name, itemRv) - } - - if expectKey, ok := tt.expectResult.data[itemKey]; !ok { - t.Errorf("Got item key %s, but expect key %s", itemKey, expectKey) - } + if !compareObjectsAndKeys(t, items, tt.namespaced, tt.expectResult.data) { + t.Errorf("got unexpected objects for keys") } } err = sWrapper.DeleteCollection("kubelet") @@ -2221,6 +2201,42 @@ func TestQueryCacheForList(t *testing.T) { } } +func compareObjectsAndKeys(t *testing.T, objs []runtime.Object, namespaced bool, keys map[string]struct{}) bool { + if len(objs) != len(keys) { + t.Errorf("expect %d keys, but got %d objects", len(keys), len(objs)) + return false + } + + accessor := meta.NewAccessor() + objKeys := make(map[string]struct{}) + for i := range objs { + kind, _ := accessor.Kind(objs[i]) + ns, _ := accessor.Namespace(objs[i]) + name, _ := accessor.Name(objs[i]) + itemRv, _ := accessor.ResourceVersion(objs[i]) + + if namespaced { + objKeys[fmt.Sprintf("%s-%s-%s-%s", strings.ToLower(kind), ns, name, itemRv)] = struct{}{} + } else { + objKeys[fmt.Sprintf("%s-%s-%s", strings.ToLower(kind), name, itemRv)] = struct{}{} + } + } + + if len(objKeys) != len(keys) { + t.Errorf("expect %d keys, but got %d object keys", len(keys), len(objKeys)) + return false + } + + for key := range objKeys { + if _, ok := keys[key]; !ok { + t.Errorf("got unexpected object with key: %s", key) + return false + } + } + + return true +} + func TestCanCacheFor(t *testing.T) { dStorage, err := disk.NewDiskStorage(rootDir) if err != nil { @@ -2289,9 +2305,9 @@ func TestCanCacheFor(t *testing.T) { }, expectCache: true, }, - "default user agent edge-tunnel-agent": { + "default user agent tunnel-agent": { request: &proxyRequest{ - userAgent: "edge-tunnel-agent", + userAgent: projectinfo.GetAgentName(), verb: "HEAD", path: "/api/v1/nodes/mynode", }, diff --git a/pkg/yurthub/cachemanager/storage_wrapper.go b/pkg/yurthub/cachemanager/storage_wrapper.go index 146de591425..6162c1cd82e 100644 --- a/pkg/yurthub/cachemanager/storage_wrapper.go +++ b/pkg/yurthub/cachemanager/storage_wrapper.go @@ -116,7 +116,6 @@ func (sw *storageWrapper) Get(key string) (runtime.Object, error) { b, err := sw.store.Get(key) if err != nil { - klog.Errorf("could not get object for %s, %v", key, err) return nil, err } else if len(b) == 0 { return nil, nil @@ -175,12 +174,16 @@ func (sw *storageWrapper) List(key string) ([]runtime.Object, error) { return nil, err } var UnstructuredObj runtime.Object + var recognized bool if scheme.Scheme.Recognizes(*gvk) { - UnstructuredObj = nil - } else { - UnstructuredObj = new(unstructured.Unstructured) + recognized = true } + for i := range bb { + if !recognized { + UnstructuredObj = new(unstructured.Unstructured) + } + obj, gvk, err := sw.backendSerializer.Decode(bb[i], nil, UnstructuredObj) if err != nil { klog.Errorf("could not decode %v for %s, %v", gvk, key, err) diff --git a/pkg/yurthub/certificate/hubself/cert_mgr.go b/pkg/yurthub/certificate/hubself/cert_mgr.go index f9bca760fef..2a11f9b4fca 100644 --- a/pkg/yurthub/certificate/hubself/cert_mgr.go +++ b/pkg/yurthub/certificate/hubself/cert_mgr.go @@ -17,6 +17,7 @@ limitations under the License. package hubself import ( + "context" "crypto/tls" "crypto/x509" "crypto/x509/pkix" @@ -257,7 +258,7 @@ func (ycm *yurtHubCertManager) initCaCert() error { } // make sure configMap kube-public/cluster-info in k8s cluster beforehand - insecureClusterInfo, err := insecureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(ClusterInfoName, metav1.GetOptions{}) + insecureClusterInfo, err := insecureClient.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(context.Background(), ClusterInfoName, metav1.GetOptions{}) if err != nil { klog.Errorf("failed to get cluster-info configmap, %v", err) return err @@ -326,7 +327,8 @@ func (ycm *yurtHubCertManager) initClientCertificateManager() error { ycm.hubClientCertPath = s.CurrentPath() m, err := certificate.NewManager(&certificate.Config{ - ClientFn: ycm.generateCertClientFn, + ClientFn: ycm.generateCertClientFn, + SignerName: certificates.KubeAPIServerClientKubeletSignerName, Template: &x509.CertificateRequest{ Subject: pkix.Name{ CommonName: fmt.Sprintf("system:node:%s", ycm.nodeName), diff --git a/pkg/yurthub/gc/gc.go b/pkg/yurthub/gc/gc.go index 6836a37a310..3a72b68ce3c 100644 --- a/pkg/yurthub/gc/gc.go +++ b/pkg/yurthub/gc/gc.go @@ -17,11 +17,12 @@ limitations under the License. package gc import ( + "context" "fmt" "time" "github.com/openyurtio/openyurt/cmd/yurthub/app/config" - "github.com/openyurtio/openyurt/pkg/yurthub/storage" + "github.com/openyurtio/openyurt/pkg/yurthub/cachemanager" "github.com/openyurtio/openyurt/pkg/yurthub/transport" "github.com/openyurtio/openyurt/pkg/yurthub/util" @@ -40,7 +41,7 @@ var ( // GCManager is responsible for cleanup garbage of yurthub type GCManager struct { - store storage.Store + store cachemanager.StorageWrapper transportManager transport.Interface nodeName string eventsGCFrequency time.Duration @@ -49,13 +50,13 @@ type GCManager struct { } // NewGCManager creates a *GCManager object -func NewGCManager(cfg *config.YurtHubConfiguration, store storage.Store, transportManager transport.Interface, stopCh <-chan struct{}) (*GCManager, error) { +func NewGCManager(cfg *config.YurtHubConfiguration, transportManager transport.Interface, stopCh <-chan struct{}) (*GCManager, error) { gcFrequency := cfg.GCFrequency if gcFrequency == 0 { gcFrequency = defaultEventGcInterval } mgr := &GCManager{ - store: store, + store: cfg.StorageWrapper, transportManager: transportManager, nodeName: cfg.NodeName, eventsGCFrequency: time.Duration(gcFrequency) * time.Minute, @@ -107,7 +108,7 @@ func (m *GCManager) gcPodsWhenRestart() error { } listOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector("spec.nodeName", m.nodeName).String()} - podList, err := kubeClient.CoreV1().Pods(v1.NamespaceAll).List(listOpts) + podList, err := kubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.Background(), listOpts) if err != nil { klog.Errorf("could not list pods for node(%s), %v", m.nodeName, err) return err @@ -169,7 +170,7 @@ func (m *GCManager) gcEvents(kubeClient clientset.Interface, component string) { continue } - _, err := kubeClient.CoreV1().Events(ns).Get(name, metav1.GetOptions{}) + _, err := kubeClient.CoreV1().Events(ns).Get(context.Background(), name, metav1.GetOptions{}) if apierrors.IsNotFound(err) { deletedEvents = append(deletedEvents, key) } else if err != nil { diff --git a/pkg/yurthub/healthchecker/health_checker.go b/pkg/yurthub/healthchecker/health_checker.go index 3aa42af3edf..5a6258b9b61 100644 --- a/pkg/yurthub/healthchecker/health_checker.go +++ b/pkg/yurthub/healthchecker/health_checker.go @@ -70,7 +70,7 @@ type checker struct { } // NewHealthChecker create an HealthChecker for servers -func NewHealthChecker(cfg *config.YurtHubConfiguration, tp transport.Interface, sw cachemanager.StorageWrapper, stopCh <-chan struct{}) (HealthChecker, error) { +func NewHealthChecker(cfg *config.YurtHubConfiguration, tp transport.Interface, stopCh <-chan struct{}) (HealthChecker, error) { if len(cfg.RemoteServers) == 0 { return nil, fmt.Errorf("no remote servers") } @@ -79,7 +79,7 @@ func NewHealthChecker(cfg *config.YurtHubConfiguration, tp transport.Interface, checkers: make(map[string]*checker), remoteServers: cfg.RemoteServers, remoteServerIndex: 0, - sw: sw, + sw: cfg.StorageWrapper, stopCh: stopCh, } diff --git a/pkg/yurthub/healthchecker/node_lease.go b/pkg/yurthub/healthchecker/node_lease.go index 3ac23212e43..76e2c56c387 100644 --- a/pkg/yurthub/healthchecker/node_lease.go +++ b/pkg/yurthub/healthchecker/node_lease.go @@ -17,6 +17,7 @@ limitations under the License. package healthchecker import ( + "context" "fmt" "time" @@ -82,7 +83,7 @@ func (nl *nodeLeaseImpl) retryUpdateLease(base *coordinationv1.Lease) (*coordina var err error var lease *coordinationv1.Lease for i := 0; i < nl.failedRetry; i++ { - lease, err = nl.leaseClient.Update(nl.newLease(base)) + lease, err = nl.leaseClient.Update(context.Background(), nl.newLease(base), metav1.UpdateOptions{}) if err == nil { return lease, nil } @@ -121,9 +122,9 @@ func (nl *nodeLeaseImpl) backoffEnsureLease() (*coordinationv1.Lease, bool, erro } func (nl *nodeLeaseImpl) ensureLease() (*coordinationv1.Lease, bool, error) { - lease, err := nl.leaseClient.Get(nl.holderIdentity, metav1.GetOptions{}) + lease, err := nl.leaseClient.Get(context.Background(), nl.holderIdentity, metav1.GetOptions{}) if apierrors.IsNotFound(err) { - lease, err := nl.leaseClient.Create(nl.newLease(nil)) + lease, err := nl.leaseClient.Create(context.Background(), nl.newLease(nil), metav1.CreateOptions{}) if err != nil { return nil, false, err } @@ -153,7 +154,7 @@ func (nl *nodeLeaseImpl) newLease(base *coordinationv1.Lease) *coordinationv1.Le lease.Spec.RenewTime = &metav1.MicroTime{Time: nl.clock.Now()} if lease.OwnerReferences == nil || len(lease.OwnerReferences) == 0 { - if node, err := nl.client.CoreV1().Nodes().Get(nl.holderIdentity, metav1.GetOptions{}); err == nil { + if node, err := nl.client.CoreV1().Nodes().Get(context.Background(), nl.holderIdentity, metav1.GetOptions{}); err == nil { lease.OwnerReferences = []metav1.OwnerReference{ { APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version, diff --git a/pkg/yurthub/kubernetes/serializer/serializer.go b/pkg/yurthub/kubernetes/serializer/serializer.go index 3d6dd39cb34..d64bcad9d4c 100644 --- a/pkg/yurthub/kubernetes/serializer/serializer.go +++ b/pkg/yurthub/kubernetes/serializer/serializer.go @@ -17,10 +17,12 @@ limitations under the License. package serializer import ( + "bytes" "fmt" "io" "mime" "strings" + "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -31,8 +33,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/klog" ) @@ -44,13 +46,13 @@ var YurtHubSerializer = NewSerializerManager() var UnsafeDefaultRESTMapper = NewDefaultRESTMapperFromScheme() func NewDefaultRESTMapperFromScheme() *meta.DefaultRESTMapper { - scheme := scheme.Scheme - defaultGroupVersions := scheme.PrioritizedVersionsAllGroups() + s := scheme.Scheme + defaultGroupVersions := s.PrioritizedVersionsAllGroups() mapper := meta.NewDefaultRESTMapper(defaultGroupVersions) // enumerate all supported versions, get the kinds, and register with the mapper how to address // our resources. for _, gv := range defaultGroupVersions { - for kind := range scheme.KnownTypes(gv) { + for kind := range s.KnownTypes(gv) { //Since RESTMapper is only used for mapping GVR to GVK information, //the scope field is not involved in actual use, so all scope are currently set to meta.RESTScopeNamespace scope := meta.RESTScopeNamespace @@ -60,23 +62,49 @@ func NewDefaultRESTMapperFromScheme() *meta.DefaultRESTMapper { return mapper } +type yurtClientNegotiator struct { + recognized bool + runtime.ClientNegotiator +} + // SerializerManager is responsible for managing *rest.Serializers type SerializerManager struct { + sync.Mutex // NegotiatedSerializer is used for obtaining encoders and decoders for multiple // supported media types. NegotiatedSerializer runtime.NegotiatedSerializer // UnstructuredNegotiatedSerializer is used to obtain encoders and decoders // for resources not registered in the scheme UnstructuredNegotiatedSerializer runtime.NegotiatedSerializer + // ClientNegotiators includes all of ClientNegotiators by GroupVersionResource + ClientNegotiators map[schema.GroupVersionResource]*yurtClientNegotiator + // WatchEventClientNegotiator is a ClientNegotiators for WatchEvent + WatchEventClientNegotiator runtime.ClientNegotiator } // NewSerializerManager creates a *SerializerManager object with no version conversion func NewSerializerManager() *SerializerManager { - return &SerializerManager{ + sm := &SerializerManager{ // do not need version conversion, and keep the gvk information NegotiatedSerializer: WithVersionCodecFactory{CodecFactory: scheme.Codecs}, UnstructuredNegotiatedSerializer: NewUnstructuredNegotiatedSerializer(), + ClientNegotiators: make(map[schema.GroupVersionResource]*yurtClientNegotiator), + } + + watchEventGVR := metav1.SchemeGroupVersion.WithResource("watchevents") + sm.WatchEventClientNegotiator = runtime.NewClientNegotiator(sm.NegotiatedSerializer, watchEventGVR.GroupVersion()) + sm.ClientNegotiators[watchEventGVR] = &yurtClientNegotiator{recognized: true, ClientNegotiator: sm.WatchEventClientNegotiator} + return sm +} + +// GetNegotiatedSerializer returns an NegotiatedSerializer object based on GroupVersionResource +func (sm *SerializerManager) GetNegotiatedSerializer(gvr schema.GroupVersionResource) runtime.NegotiatedSerializer { + _, kindErr := UnsafeDefaultRESTMapper.KindFor(gvr) + if kindErr == nil { + return sm.NegotiatedSerializer } + + return sm.UnstructuredNegotiatedSerializer } // WithVersionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. @@ -185,134 +213,161 @@ func (c unstructuredCreator) New(kind schema.GroupVersionKind) (runtime.Object, } } -// CreateSerializers create a *rest.Serializers for encoding or decoding runtime object -func (sm *SerializerManager) CreateSerializers(contentType, group, version, resource string) (*rest.Serializers, error) { - var mediaTypes []runtime.SerializerInfo +// genClientNegotiator creates a ClientNegotiator for specified GroupVersionResource and gvr is recognized or not +func (sm *SerializerManager) genClientNegotiator(gvr schema.GroupVersionResource) (runtime.ClientNegotiator, bool) { + _, kindErr := UnsafeDefaultRESTMapper.KindFor(gvr) + if kindErr == nil { + return runtime.NewClientNegotiator(sm.NegotiatedSerializer, gvr.GroupVersion()), true + } + klog.Infof("%#+v is not found in client-go runtime scheme", gvr) + return runtime.NewClientNegotiator(sm.UnstructuredNegotiatedSerializer, gvr.GroupVersion()), false + +} + +// Serializer is used for transforming objects into a serialized format and back for cache manager of hub agent. +type Serializer struct { + recognized bool + contentType string + runtime.ClientNegotiator + watchEventClientNegotiator runtime.ClientNegotiator +} + +// CreateSerializer will returns a Serializer object for encoding or decoding runtime object. +func (sm *SerializerManager) CreateSerializer(contentType, group, version, resource string) *Serializer { + var recognized bool + var clientNegotiator runtime.ClientNegotiator gvr := schema.GroupVersionResource{ Group: group, Version: version, Resource: resource, } - _, kindErr := UnsafeDefaultRESTMapper.KindFor(gvr) - if kindErr == nil || resource == "WatchEvent" { - mediaTypes = sm.NegotiatedSerializer.SupportedMediaTypes() - } else { - mediaTypes = sm.UnstructuredNegotiatedSerializer.SupportedMediaTypes() - } - mediaType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return nil, fmt.Errorf("the content type(%s) specified in the request is not recognized: %v", contentType, err) + + if len(contentType) == 0 { + return nil } - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) - if !ok { - if mediaType == "application/vnd.kubernetes.protobuf" && kindErr != nil { - return nil, fmt.Errorf("*unstructured.Unstructured(%s/%s) does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", group, version) - } - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, fmt.Errorf("no serializers registered for %s", contentType) + + sm.Lock() + defer sm.Unlock() + if cn, ok := sm.ClientNegotiators[gvr]; ok { + clientNegotiator, recognized = cn.ClientNegotiator, cn.recognized + } else { + clientNegotiator, recognized = sm.genClientNegotiator(gvr) + sm.ClientNegotiators[gvr] = &yurtClientNegotiator{ + recognized: recognized, + ClientNegotiator: clientNegotiator, } - info = mediaTypes[0] } - internalGV := schema.GroupVersions{ - { - Group: group, - Version: runtime.APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: runtime.APIVersionInternal, - }, + return &Serializer{ + recognized: recognized, + contentType: contentType, + ClientNegotiator: clientNegotiator, + watchEventClientNegotiator: sm.WatchEventClientNegotiator, } - reqGroupVersion := schema.GroupVersion{ - Group: group, - Version: version, - } - var encoder runtime.Encoder +} + +// Decode decodes byte data into runtime object with embedded contentType. +func (s *Serializer) Decode(b []byte) (runtime.Object, error) { var decoder runtime.Decoder - if kindErr == nil { - encoder = sm.NegotiatedSerializer.EncoderForVersion(info.Serializer, &reqGroupVersion) - decoder = sm.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV) - } else { - encoder = sm.UnstructuredNegotiatedSerializer.EncoderForVersion(info.Serializer, &reqGroupVersion) - decoder = sm.UnstructuredNegotiatedSerializer.DecoderToVersion(info.Serializer, &reqGroupVersion) + if len(b) == 0 { + return nil, fmt.Errorf("0-length response body, content type: %s", s.contentType) } - s := &rest.Serializers{ - Encoder: encoder, - Decoder: decoder, + mediaType, params, err := mime.ParseMediaType(s.contentType) + if err != nil { + return nil, err + } - RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { - info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - return nil, fmt.Errorf("serializer for %s not registered", contentType) - } - if kindErr == nil { - return sm.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil - } else { - return sm.UnstructuredNegotiatedSerializer.DecoderToVersion(info.Serializer, &reqGroupVersion), nil - } - }, + decoder, err = s.Decoder(mediaType, params) + if err != nil { + return nil, err } - if info.StreamSerializer != nil { - s.StreamingSerializer = info.StreamSerializer.Serializer - s.Framer = info.StreamSerializer.Framer + + out, _, err := decoder.Decode(b, nil, nil) + if err != nil { + return nil, err } - return s, nil + return out, nil } -// DecodeResp decodes byte data into runtime object with specified serializers and content type -func DecodeResp(serializers *rest.Serializers, b []byte, reqContentType, respContentType string) (runtime.Object, error) { - decoder := serializers.Decoder - if len(respContentType) > 0 && (decoder == nil || (len(reqContentType) > 0 && respContentType != reqContentType)) { - mediaType, params, err := mime.ParseMediaType(respContentType) - if err != nil { - return nil, fmt.Errorf("response content type(%s) is invalid, %v", respContentType, err) - } - decoder, err = serializers.RenegotiatedDecoder(mediaType, params) - if err != nil { - return nil, fmt.Errorf("response content type(%s) is not supported, %v", respContentType, err) - } - klog.Infof("serializer decoder changed from %s to %s(%v)", reqContentType, respContentType, params) - } - - if len(b) == 0 { - return nil, fmt.Errorf("0-length response with content type: %s", respContentType) +// WatchDecoder generates a Decoder for decoding response of watch request. +func (s *Serializer) WatchDecoder(body io.ReadCloser) (*restclientwatch.Decoder, error) { + var err error + var embeddedObjectDecoder runtime.Decoder + var streamingSerializer runtime.Serializer + var framer runtime.Framer + mediaType, params, err := mime.ParseMediaType(s.contentType) + if err != nil { + return nil, err } - out, _, err := decoder.Decode(b, nil, nil) + embeddedObjectDecoder, streamingSerializer, framer, err = s.StreamDecoder(mediaType, params) if err != nil { return nil, err } - // if a different object is returned, see if it is Status and avoid double decoding - // the object. - switch out.(type) { - case *metav1.Status: - // it's not need to cache for status - return out, nil + if !s.recognized { + // if gvr is not recognized, maybe it's a crd resource, so do not use the same streaming + // serializer to decode watch event object, and instead use watch event client negotiator. + _, streamingSerializer, framer, err = s.watchEventClientNegotiator.StreamDecoder(mediaType, params) + if err != nil { + return nil, err + } } - return out, nil + + frameReader := framer.NewFrameReader(body) + streamingDecoder := streaming.NewDecoder(frameReader, streamingSerializer) + return restclientwatch.NewDecoder(streamingDecoder, embeddedObjectDecoder), nil } -// CreateWatchDecoder generates a Decoder for watch response -func CreateWatchDecoder(contentType, group, version, resource string, body io.ReadCloser) (*restclientwatch.Decoder, error) { - //get the general serializers to decode the watch event - serializers, err := YurtHubSerializer.CreateSerializers(contentType, group, version, "WatchEvent") +// WatchEncode writes watch event to provided io.Writer +func (s *Serializer) WatchEncode(w io.Writer, event *watch.Event) (int, error) { + mediaType, params, err := mime.ParseMediaType(s.contentType) if err != nil { - klog.Errorf("failed to create serializers in saveWatchObject, %v", err) - return nil, err + return 0, err } - //get the serializers to decode the embedded object inside watch event according to the GVR of embedded object - embeddedSerializers, err := YurtHubSerializer.CreateSerializers(contentType, group, version, resource) + // 1. prepare streaming encoder for watch event and embedded encoder for event.object + _, streamingSerializer, framer, err := s.watchEventClientNegotiator.StreamDecoder(mediaType, params) if err != nil { - klog.Errorf("failed to create serializers in saveWatchObject, %v", err) - return nil, err + return 0, err + } + + sw := &sizeWriter{Writer: w} + streamingEncoder := streaming.NewEncoder(framer.NewFrameWriter(sw), streamingSerializer) + embeddedEncoder, err := s.Encoder(mediaType, params) + if err != nil { + return 0, err } - framer := serializers.Framer.NewFrameReader(body) - streamingDecoder := streaming.NewDecoder(framer, serializers.StreamingSerializer) - return restclientwatch.NewDecoder(streamingDecoder, embeddedSerializers.Decoder), nil + // 2. encode the embedded object into bytes.Buffer + buf := &bytes.Buffer{} + obj := event.Object + if err := embeddedEncoder.Encode(obj, buf); err != nil { + return 0, err + } + + // 3. make up metav1.WatchEvent and encode it by using streaming encoder + outEvent := &metav1.WatchEvent{} + outEvent.Type = string(event.Type) + outEvent.Object.Raw = buf.Bytes() + + if err := streamingEncoder.Encode(outEvent); err != nil { + return 0, err + } + + return sw.size, nil +} + +// sizeWriter used to hold total wrote bytes size +type sizeWriter struct { + io.Writer + size int +} + +func (sw *sizeWriter) Write(p []byte) (int, error) { + n, err := sw.Writer.Write(p) + sw.size = sw.size + n + klog.V(5).Infof("encode bytes data: write bytes=%d, size=%d, bytes=%v", n, sw.size, p[:n]) + return n, err } diff --git a/pkg/yurthub/network/iptables.go b/pkg/yurthub/network/iptables.go index 1be90ded5f4..5dfd43628cc 100644 --- a/pkg/yurthub/network/iptables.go +++ b/pkg/yurthub/network/iptables.go @@ -20,7 +20,6 @@ import ( "strings" "k8s.io/klog" - utildbus "k8s.io/kubernetes/pkg/util/dbus" "k8s.io/kubernetes/pkg/util/iptables" "k8s.io/utils/exec" ) @@ -40,8 +39,7 @@ type IptablesManager struct { func NewIptablesManager(dummyIfIP, dummyIfPort string) *IptablesManager { protocol := iptables.ProtocolIpv4 execer := exec.New() - dbus := utildbus.New() - iptInterface := iptables.New(execer, dbus, protocol) + iptInterface := iptables.New(execer, protocol) im := &IptablesManager{ iptables: iptInterface, diff --git a/pkg/yurthub/proxy/local/local.go b/pkg/yurthub/proxy/local/local.go index 31b5b8a615f..d0bb295aade 100644 --- a/pkg/yurthub/proxy/local/local.go +++ b/pkg/yurthub/proxy/local/local.go @@ -18,7 +18,6 @@ package local import ( "bytes" - "context" "fmt" "io" "net/http" @@ -31,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" apirequest "k8s.io/apiserver/pkg/endpoints/request" @@ -111,12 +111,15 @@ func (lp *LocalProxy) localPost(w http.ResponseWriter, req *http.Request) error ctx := req.Context() info, _ := apirequest.RequestInfoFrom(ctx) - if info.Resource == "events" { + reqContentType, _ := util.ReqContentTypeFrom(ctx) + if info.Resource == "events" && len(reqContentType) != 0 { + ctx = util.WithRespContentType(ctx, reqContentType) + req = req.WithContext(ctx) stopCh := make(chan struct{}) rc, prc := util.NewDualReadCloser(req.Body, false) - go func(ctx context.Context, prc io.ReadCloser, stopCh <-chan struct{}) { - klog.V(2).Infof("cache events when cluster is unhealthy, %v", lp.cacheMgr.CacheResponse(ctx, prc, stopCh)) - }(ctx, prc, stopCh) + go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { + klog.V(2).Infof("cache events when cluster is unhealthy, %v", lp.cacheMgr.CacheResponse(req, prc, stopCh)) + }(req, prc, stopCh) req.Body = rc } @@ -154,7 +157,7 @@ func (lp *LocalProxy) localWatch(w http.ResponseWriter, req *http.Request) error } opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { return errors.NewBadRequest(err.Error()) } @@ -202,6 +205,7 @@ func (lp *LocalProxy) localReqCache(w http.ResponseWriter, req *http.Request) er obj, err := lp.cacheMgr.QueryCache(req) if err == storage.ErrStorageNotFound { + klog.Errorf("object not found for %s", util.ReqString(req)) reqInfo, _ := apirequest.RequestInfoFrom(req.Context()) return errors.NewNotFound(schema.GroupResource{Group: reqInfo.APIGroup, Resource: reqInfo.Resource}, reqInfo.Name) } else if err != nil { @@ -212,8 +216,7 @@ func (lp *LocalProxy) localReqCache(w http.ResponseWriter, req *http.Request) er return errors.NewInternalError(fmt.Errorf("no cache object for %s", util.ReqString(req))) } - util.WriteObject(http.StatusOK, obj, w, req) - return nil + return util.WriteObject(http.StatusOK, obj, w, req) } func copyHeader(dst, src http.Header) { diff --git a/pkg/yurthub/proxy/local/local_test.go b/pkg/yurthub/proxy/local/local_test.go index 7312d80f1ac..868bcb80e5a 100644 --- a/pkg/yurthub/proxy/local/local_test.go +++ b/pkg/yurthub/proxy/local/local_test.go @@ -425,7 +425,7 @@ func TestServeHTTPForGetReqCache(t *testing.T) { resolver := newTestRequestInfoResolver() for k, tt := range testcases { t.Run(k, func(t *testing.T) { - jsonDecoder, _ := serializerM.CreateSerializers(tt.accept, "", "v1", tt.resource) + s := serializerM.CreateSerializer(tt.accept, "", "v1", tt.resource) accessor := meta.NewAccessor() for i := range tt.inputObj { name, _ := accessor.Name(tt.inputObj[i]) @@ -459,12 +459,12 @@ func TestServeHTTPForGetReqCache(t *testing.T) { } buf := bytes.NewBuffer([]byte{}) - _, err := buf.ReadFrom(result.Body) + _, err = buf.ReadFrom(result.Body) if err != nil { t.Errorf("read from result body failed, %v", err) } - obj, _, err := jsonDecoder.Decoder.Decode(buf.Bytes(), nil, nil) + obj, err := s.Decode(buf.Bytes()) if err != nil { t.Errorf("decode response failed, %v", err) } @@ -583,7 +583,7 @@ func TestServeHTTPForListReqCache(t *testing.T) { resolver := newTestRequestInfoResolver() for k, tt := range testcases { t.Run(k, func(t *testing.T) { - jsonDecoder, _ := serializerM.CreateSerializers(tt.accept, "", "v1", tt.resource) + s := serializerM.CreateSerializer(tt.accept, "", "v1", tt.resource) accessor := meta.NewAccessor() for i := range tt.inputObj { name, _ := accessor.Name(tt.inputObj[i]) @@ -617,12 +617,12 @@ func TestServeHTTPForListReqCache(t *testing.T) { } buf := bytes.NewBuffer([]byte{}) - _, err := buf.ReadFrom(result.Body) + _, err = buf.ReadFrom(result.Body) if err != nil { t.Errorf("read from result body failed, %v", err) } - obj, _, err := jsonDecoder.Decoder.Decode(buf.Bytes(), nil, nil) + obj, err := s.Decode(buf.Bytes()) if err != nil { t.Errorf("decode response failed, %v", err) } diff --git a/pkg/yurthub/proxy/remote/remote.go b/pkg/yurthub/proxy/remote/remote.go index ddf7bc05595..91eb857bbea 100644 --- a/pkg/yurthub/proxy/remote/remote.go +++ b/pkg/yurthub/proxy/remote/remote.go @@ -106,20 +106,21 @@ func (rp *RemoteProxy) modifyResponse(resp *http.Response) error { // cache resp with storage interface if resp.StatusCode >= http.StatusOK && resp.StatusCode <= http.StatusPartialContent { if rp.cacheMgr.CanCacheFor(req) { - respContentType := resp.Header.Get("Content-Type") - ctx = util.WithRespContentType(ctx, respContentType) reqContentType, _ := util.ReqContentTypeFrom(ctx) - if len(reqContentType) == 0 || reqContentType == "*/*" { - ctx = util.WithReqContentType(ctx, respContentType) + respContentType := resp.Header.Get("Content-Type") + if len(respContentType) == 0 { + respContentType = reqContentType } + ctx = util.WithRespContentType(ctx, respContentType) + req = req.WithContext(ctx) rc, prc := util.NewDualReadCloser(resp.Body, true) - go func(ctx context.Context, prc io.ReadCloser, stopCh <-chan struct{}) { - err := rp.cacheMgr.CacheResponse(ctx, prc, stopCh) + go func(req *http.Request, prc io.ReadCloser, stopCh <-chan struct{}) { + err := rp.cacheMgr.CacheResponse(req, prc, stopCh) if err != nil && err != io.EOF && err != context.Canceled { klog.Errorf("%s response cache ended with error, %v", util.ReqString(req), err) } - }(ctx, prc, rp.stopCh) + }(req, prc, rp.stopCh) resp.Body = rc } diff --git a/pkg/yurthub/proxy/util/util.go b/pkg/yurthub/proxy/util/util.go index ecadf2c8b5d..9a3c23047f1 100644 --- a/pkg/yurthub/proxy/util/util.go +++ b/pkg/yurthub/proxy/util/util.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -130,7 +131,7 @@ func WithListRequestSelector(handler http.Handler) http.Handler { if info.IsResourceRequest && info.Verb == "list" && info.Name == "" { // list request with fieldSelector=metadata.name does not need to set selector string opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err == nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err == nil { if str := selectorString(opts.LabelSelector, opts.FieldSelector); str != "" { ctx = util.WithListSelector(ctx, str) req = req.WithContext(ctx) @@ -259,7 +260,7 @@ func WithRequestTimeout(handler http.Handler) http.Handler { var timeout time.Duration if info.Verb == "list" || info.Verb == "watch" { opts := metainternalversion.ListOptions{} - if err := metainternalversion.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil { klog.Errorf("failed to decode parameter for list/watch request: %s", util.ReqString(req)) util.Err(errors.NewBadRequest(err.Error()), w, req) return diff --git a/pkg/yurthub/transport/transport.go b/pkg/yurthub/transport/transport.go index 1e280a032cd..931834e85c6 100644 --- a/pkg/yurthub/transport/transport.go +++ b/pkg/yurthub/transport/transport.go @@ -32,93 +32,58 @@ import ( "k8s.io/klog" ) -const ( - deaultHealthzTimeoutSeconds = 2 -) - // Interface is an transport interface for managing clients that used to connecting kube-apiserver type Interface interface { - // HealthzHTTPClient returns http client that used by health checker - HealthzHTTPClient() *http.Client // concurrent use by multiple goroutines // CurrentTransport get transport that used by load balancer CurrentTransport() *http.Transport // GetRestClientConfig get rest config that used by gc GetRestClientConfig() *rest.Config - // UpdateTransport update secure transport manager with certificate manager - UpdateTransport(certMgr interfaces.YurtCertificateManager) error // close all net connections that specified by address Close(address string) } type transportManager struct { - dialer *util.Dialer - healthzHTTPClient *http.Client - currentTransport *http.Transport - certManager interfaces.YurtCertificateManager - closeAll func() - close func(string) - stopCh <-chan struct{} + currentTransport *http.Transport + certManager interfaces.YurtCertificateManager + closeAll func() + close func(string) + stopCh <-chan struct{} } // NewTransportManager create an transport interface object. -func NewTransportManager(heartbeatTimeoutSeconds int, stopCh <-chan struct{}) (Interface, error) { - d := util.NewDialer("transport manager") - t := utilnet.SetTransportDefaults(&http.Transport{ - Proxy: http.ProxyFromEnvironment, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - MaxIdleConnsPerHost: 25, - DialContext: d.DialContext, - }) - - if heartbeatTimeoutSeconds == 0 { - heartbeatTimeoutSeconds = deaultHealthzTimeoutSeconds - } - - tm := &transportManager{ - healthzHTTPClient: &http.Client{ - Transport: t, - Timeout: time.Duration(heartbeatTimeoutSeconds) * time.Second, - }, - dialer: d, - closeAll: d.CloseAll, - close: d.Close, - stopCh: stopCh, - } - - return tm, nil -} - -// UpdateTransport used to update ca file and tls config -func (tm *transportManager) UpdateTransport(certMgr interfaces.YurtCertificateManager) error { +func NewTransportManager(certMgr interfaces.YurtCertificateManager, stopCh <-chan struct{}) (Interface, error) { caFile := certMgr.GetCaFile() if len(caFile) == 0 { - return fmt.Errorf("ca cert file was not prepared when update tranport") + return nil, fmt.Errorf("ca cert file was not prepared when new tranport") } klog.V(2).Infof("use %s ca cert file to access remote server", caFile) cfg, err := tlsConfig(certMgr, caFile) if err != nil { - klog.Errorf("could not get tls config when update transport, %v", err) - return err + klog.Errorf("could not get tls config when new transport, %v", err) + return nil, err } - tm.currentTransport = utilnet.SetTransportDefaults(&http.Transport{ + d := util.NewDialer("transport manager") + t := utilnet.SetTransportDefaults(&http.Transport{ Proxy: http.ProxyFromEnvironment, TLSHandshakeTimeout: 10 * time.Second, TLSClientConfig: cfg, MaxIdleConnsPerHost: 25, - DialContext: tm.dialer.DialContext, + DialContext: d.DialContext, }) - tm.certManager = certMgr + tm := &transportManager{ + currentTransport: t, + certManager: certMgr, + closeAll: d.CloseAll, + close: d.Close, + stopCh: stopCh, + } tm.start() - return nil -} -func (tm *transportManager) HealthzHTTPClient() *http.Client { - return tm.healthzHTTPClient + return tm, nil } func (tm *transportManager) GetRestClientConfig() *rest.Config { diff --git a/pkg/yurthub/util/util.go b/pkg/yurthub/util/util.go index 808541a5534..5e204610bb6 100644 --- a/pkg/yurthub/util/util.go +++ b/pkg/yurthub/util/util.go @@ -148,33 +148,35 @@ func IsKubeletLeaseReq(req *http.Request) bool { } // WriteObject write object to response writer -func WriteObject(statusCode int, obj runtime.Object, w http.ResponseWriter, req *http.Request) { +func WriteObject(statusCode int, obj runtime.Object, w http.ResponseWriter, req *http.Request) error { ctx := req.Context() - gv := schema.GroupVersion{ - Group: "", - Version: runtime.APIVersionInternal, - } if info, ok := apirequest.RequestInfoFrom(ctx); ok { - gv.Group = info.APIGroup - gv.Version = info.APIVersion + gv := schema.GroupVersion{ + Group: info.APIGroup, + Version: info.APIVersion, + } + negotiatedSerializer := serializer.YurtHubSerializer.GetNegotiatedSerializer(gv.WithResource(info.Resource)) + responsewriters.WriteObjectNegotiated(negotiatedSerializer, negotiation.DefaultEndpointRestrictions, gv, w, req, statusCode, obj) + return nil } - responsewriters.WriteObjectNegotiated(serializer.YurtHubSerializer.NegotiatedSerializer, negotiation.DefaultEndpointRestrictions, gv, w, req, statusCode, obj) + return fmt.Errorf("request info is not found when write object, %s", ReqString(req)) } // Err write err to response writer func Err(err error, w http.ResponseWriter, req *http.Request) { ctx := req.Context() - gv := schema.GroupVersion{ - Group: "", - Version: runtime.APIVersionInternal, - } if info, ok := apirequest.RequestInfoFrom(ctx); ok { - gv.Group = info.APIGroup - gv.Version = info.APIVersion + gv := schema.GroupVersion{ + Group: info.APIGroup, + Version: info.APIVersion, + } + negotiatedSerializer := serializer.YurtHubSerializer.GetNegotiatedSerializer(gv.WithResource(info.Resource)) + responsewriters.ErrorNegotiated(err, negotiatedSerializer, gv, w, req) + return } - responsewriters.ErrorNegotiated(err, serializer.YurtHubSerializer.NegotiatedSerializer, gv, w, req) + klog.Errorf("request info is not found when err write, %s", ReqString(req)) } // NewDualReadCloser create an dualReadCloser object diff --git a/pkg/yurttunnel/dns/dns.go b/pkg/yurttunnel/dns/dns.go index 113da15804d..8c0d7f11647 100644 --- a/pkg/yurttunnel/dns/dns.go +++ b/pkg/yurttunnel/dns/dns.go @@ -299,7 +299,7 @@ func (dnsctl *coreDNSRecordController) handleErr(err error, event interface{}) { func (dnsctl *coreDNSRecordController) ensureCoreDNSRecordConfigMap() error { _, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs). - Get(yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) + Get(context.Background(), yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -310,7 +310,7 @@ func (dnsctl *coreDNSRecordController) ensureCoreDNSRecordConfigMap() error { yurttunnelDNSRecordNodeDataKey: "", }, } - _, err = dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Create(cm) + _, err = dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Create(context.Background(), cm, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create ConfigMap %v/%v, %v", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) @@ -370,7 +370,7 @@ func (dnsctl *coreDNSRecordController) getTunnelServerIP(useCache bool) (string, } svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). - Get(constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) + Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("failed to get %v/%v service, %v", constants.YurttunnelServerServiceNs, constants.YurttunnelServerInternalServiceName, err) @@ -391,12 +391,12 @@ func (dnsctl *coreDNSRecordController) updateDNSRecords(records []string) error sort.Strings(records) cm, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs). - Get(yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) + Get(context.Background(), yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) if err != nil { return err } cm.Data[yurttunnelDNSRecordNodeDataKey] = strings.Join(records, "\n") - if _, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Update(cm); err != nil { + if _, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs).Update(context.Background(), cm, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("failed to update configmap %v/%v, %v", constants.YurttunnelServerServiceNs, yurttunnelDNSRecordConfigMapName, err) } @@ -405,7 +405,7 @@ func (dnsctl *coreDNSRecordController) updateDNSRecords(records []string) error func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []string) error { svc, err := dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs). - Get(constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) + Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to sync tunnel server internal service, %v", err) } @@ -454,7 +454,7 @@ func (dnsctl *coreDNSRecordController) updateTunnelServerSvcDnatPorts(ports []st } svc.Spec.Ports = updatedSvcPorts - _, err = dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs).Update(svc) + _, err = dnsctl.kubeClient.CoreV1().Services(constants.YurttunnelServerServiceNs).Update(context.Background(), svc, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("failed to sync tunnel server service, %v", err) } diff --git a/pkg/yurttunnel/dns/handler.go b/pkg/yurttunnel/dns/handler.go index 9bd2c439993..d5d0d7c47af 100644 --- a/pkg/yurttunnel/dns/handler.go +++ b/pkg/yurttunnel/dns/handler.go @@ -17,6 +17,7 @@ limitations under the License. package dns import ( + "context" "fmt" "reflect" "strings" @@ -218,7 +219,7 @@ func (dnsctl *coreDNSRecordController) onNodeDelete(node *corev1.Node) error { func (dnsctl *coreDNSRecordController) getCurrentDNSRecords() ([]string, error) { cm, err := dnsctl.kubeClient.CoreV1().ConfigMaps(constants.YurttunnelServerServiceNs). - Get(yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) + Get(context.Background(), yurttunnelDNSRecordConfigMapName, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/yurttunnel/iptables/iptables.go b/pkg/yurttunnel/iptables/iptables.go index d14c33098b7..cf3be52f12f 100644 --- a/pkg/yurttunnel/iptables/iptables.go +++ b/pkg/yurttunnel/iptables/iptables.go @@ -28,7 +28,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" - utildbus "k8s.io/kubernetes/pkg/util/dbus" "k8s.io/kubernetes/pkg/util/iptables" "k8s.io/utils/exec" utilnet "k8s.io/utils/net" @@ -102,8 +101,7 @@ func NewIptablesManager(client clientset.Interface, protocol := iptables.ProtocolIpv4 execer := exec.New() - dbus := utildbus.New() - iptInterface := iptables.New(execer, dbus, protocol) + iptInterface := iptables.New(execer, protocol) if syncPeriod < defaultSyncPeriod { syncPeriod = defaultSyncPeriod diff --git a/pkg/yurttunnel/pki/certmanager/certmanager.go b/pkg/yurttunnel/pki/certmanager/certmanager.go index 0ebdc8dc36e..864220997c0 100644 --- a/pkg/yurttunnel/pki/certmanager/certmanager.go +++ b/pkg/yurttunnel/pki/certmanager/certmanager.go @@ -17,6 +17,7 @@ limitations under the License. package certmanager import ( + "context" "crypto/tls" "crypto/x509" "crypto/x509/pkix" @@ -59,7 +60,7 @@ func NewYurttunnelServerCertManager( } // get clusterIP for tunnel server internal service - svc, err := clientset.CoreV1().Services(constants.YurttunnelServerServiceNs).Get(constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) + svc, err := clientset.CoreV1().Services(constants.YurttunnelServerServiceNs).Get(context.Background(), constants.YurttunnelServerInternalServiceName, metav1.GetOptions{}) if err == nil { if svc.Spec.ClusterIP != "" && net.ParseIP(svc.Spec.ClusterIP) != nil { ips = append(ips, net.ParseIP(svc.Spec.ClusterIP)) diff --git a/pkg/yurttunnel/pki/certmanager/csrapprover.go b/pkg/yurttunnel/pki/certmanager/csrapprover.go index d8ef1f6341c..a60ac873f6a 100644 --- a/pkg/yurttunnel/pki/certmanager/csrapprover.go +++ b/pkg/yurttunnel/pki/certmanager/csrapprover.go @@ -17,6 +17,7 @@ limitations under the License. package certmanager import ( + "context" "crypto/x509" "encoding/pem" "fmt" @@ -24,6 +25,7 @@ import ( certificates "k8s.io/api/certificates/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" certinformer "k8s.io/client-go/informers/certificates/v1beta1" @@ -164,7 +166,7 @@ func approveYurttunnelCSR( Message: fmt.Sprintf("self-approving %s csr", projectinfo.GetTunnelName()), }) - result, err := csrClient.UpdateApproval(csr) + result, err := csrClient.UpdateApproval(context.Background(), csr, metav1.UpdateOptions{}) if err != nil { klog.Errorf("failed to approve %s csr(%s), %v", projectinfo.GetTunnelName(), csr.GetName(), err) return err diff --git a/pkg/yurttunnel/server/serveraddr/addr.go b/pkg/yurttunnel/server/serveraddr/addr.go index 322cb09ce8f..8ad09c4cf24 100644 --- a/pkg/yurttunnel/server/serveraddr/addr.go +++ b/pkg/yurttunnel/server/serveraddr/addr.go @@ -17,6 +17,7 @@ limitations under the License. package serveraddr import ( + "context" "errors" "fmt" "net" @@ -115,7 +116,7 @@ func getTunnelServerResources(clientset kubernetes.Interface) (*v1.Service, *v1. // get x-tunnel-server-svc service svc, err = clientset.CoreV1(). Services(constants.YurttunnelServerServiceNs). - Get(constants.YurttunnelServerServiceName, metav1.GetOptions{}) + Get(context.Background(), constants.YurttunnelServerServiceName, metav1.GetOptions{}) if err != nil { return svc, eps, nodeLst, err } @@ -123,7 +124,7 @@ func getTunnelServerResources(clientset kubernetes.Interface) (*v1.Service, *v1. // get x-tunnel-server-svc endpoints eps, err = clientset.CoreV1(). Endpoints(constants.YurttunnelEndpointsNs). - Get(constants.YurttunnelEndpointsName, metav1.GetOptions{}) + Get(context.Background(), constants.YurttunnelEndpointsName, metav1.GetOptions{}) if err != nil { return svc, eps, nodeLst, err } @@ -132,7 +133,7 @@ func getTunnelServerResources(clientset kubernetes.Interface) (*v1.Service, *v1. if svc.Spec.Type == corev1.ServiceTypeNodePort { labelSelector := fmt.Sprintf("%s=false", projectinfo.GetEdgeWorkerLabelKey()) // yurttunnel-server will be deployed on one of the cloud nodes - nodeLst, err = clientset.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: labelSelector}) + nodeLst, err = clientset.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{LabelSelector: labelSelector}) if err != nil { return svc, eps, nodeLst, err } diff --git a/pkg/yurttunnel/util/util.go b/pkg/yurttunnel/util/util.go index c3b00099fb4..17f38ad1fa9 100644 --- a/pkg/yurttunnel/util/util.go +++ b/pkg/yurttunnel/util/util.go @@ -1,6 +1,7 @@ package util import ( + "context" "fmt" "net/http" "strings" @@ -57,7 +58,7 @@ func GetConfiguredDnatPorts(client clientset.Interface, insecurePort string) ([] ports := make([]string, 0) c, err := client.CoreV1(). ConfigMaps(YurttunnelServerDnatConfigMapNs). - Get(YurttunnelServerDnatConfigMapName, metav1.GetOptions{}) + Get(context.Background(), YurttunnelServerDnatConfigMapName, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return nil, fmt.Errorf("configmap %s/%s is not found", diff --git a/test/e2e/common/ns/ns.go b/test/e2e/common/ns/ns.go index 931af99c525..dd73fb8ebfa 100644 --- a/test/e2e/common/ns/ns.go +++ b/test/e2e/common/ns/ns.go @@ -17,6 +17,8 @@ limitations under the License. package ns import ( + "context" + "github.com/onsi/gomega" apiv1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" @@ -27,7 +29,7 @@ import ( func DeleteNameSpace(c clientset.Interface, ns string) (err error) { deletePolicy := metav1.DeletePropagationForeground - err = c.CoreV1().Namespaces().Delete(ns, &metav1.DeleteOptions{ + err = c.CoreV1().Namespaces().Delete(context.Background(), ns, metav1.DeleteOptions{ PropagationPolicy: &deletePolicy, }) if !apierrs.IsNotFound(err) { @@ -44,14 +46,14 @@ func CreateNameSpace(c clientset.Interface, ns string) (result *apiv1.Namespace, Name: ns, }, } - result, err = namespaceClient.Create(namespace) + result, err = namespaceClient.Create(context.Background(), namespace, metav1.CreateOptions{}) return } func ListNameSpaces(c clientset.Interface) (result *apiv1.NamespaceList, err error) { - return c.CoreV1().Namespaces().List(metav1.ListOptions{}) + return c.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) } func GetNameSpace(c clientset.Interface, ns string) (result *apiv1.Namespace, err error) { - return c.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}) + return c.CoreV1().Namespaces().Get(context.Background(), ns, metav1.GetOptions{}) } diff --git a/test/e2e/common/pod/pod.go b/test/e2e/common/pod/pod.go index 82ec171d9dd..ad178b69862 100644 --- a/test/e2e/common/pod/pod.go +++ b/test/e2e/common/pod/pod.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "time" apiv1 "k8s.io/api/core/v1" @@ -26,7 +27,7 @@ import ( ) func ListPods(c clientset.Interface, ns string) (pods *apiv1.PodList, err error) { - return c.CoreV1().Pods(ns).List(metav1.ListOptions{}) + return c.CoreV1().Pods(ns).List(context.Background(), metav1.ListOptions{}) } func CreatePod(c clientset.Interface, ns string, objectMeta metav1.ObjectMeta, spec apiv1.PodSpec) (pods *apiv1.Pod, err error) { @@ -34,15 +35,15 @@ func CreatePod(c clientset.Interface, ns string, objectMeta metav1.ObjectMeta, s p := &apiv1.Pod{} p.ObjectMeta = objectMeta p.Spec = spec - return c.CoreV1().Pods(ns).Create(p) + return c.CoreV1().Pods(ns).Create(context.Background(), p, metav1.CreateOptions{}) } func GetPod(c clientset.Interface, ns, podName string) (pod *apiv1.Pod, err error) { - return c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) + return c.CoreV1().Pods(ns).Get(context.Background(), podName, metav1.GetOptions{}) } func DeletePod(c clientset.Interface, ns, podName string) (err error) { - return c.CoreV1().Pods(ns).Delete(podName, &metav1.DeleteOptions{}) + return c.CoreV1().Pods(ns).Delete(context.Background(), podName, metav1.DeleteOptions{}) } func VerifyPodsRunning(c clientset.Interface, ns, podName string, wantName bool, replicas int32) error { diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 92d8b630ba5..a452be4758b 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -17,6 +17,7 @@ limitations under the License. package e2e import ( + "context" "flag" "math/rand" "os" @@ -85,7 +86,7 @@ func PreCheckOk() bool { return false } - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { klog.Errorf("pre_check_get_nodes failed errmsg:%v", err) return false diff --git a/test/e2e/yurthub/yurthub.go b/test/e2e/yurthub/yurthub.go index 1c098e9e19d..59aa8c83b77 100644 --- a/test/e2e/yurthub/yurthub.go +++ b/test/e2e/yurthub/yurthub.go @@ -17,6 +17,7 @@ limitations under the License. package yurthub import ( + "context" "encoding/json" "strconv" "time" @@ -111,7 +112,7 @@ func Register() { patchData, err := json.Marshal(patchNode) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail marshal patch node") - node, err := c.CoreV1().Nodes().Patch(pod.Spec.NodeName, types.StrategicMergePatchType, patchData) + node, err := c.CoreV1().Nodes().Patch(context.Background(), pod.Spec.NodeName, types.StrategicMergePatchType, patchData, metav1.PatchOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "fail patch node autonomy") ginkgo.By("next will stop node") diff --git a/test/e2e/yurttunnel/yurttunnel.go b/test/e2e/yurttunnel/yurttunnel.go index 898652b78ee..130a46a8001 100644 --- a/test/e2e/yurttunnel/yurttunnel.go +++ b/test/e2e/yurttunnel/yurttunnel.go @@ -19,6 +19,7 @@ package yurttunnel import ( "bytes" + "context" "fmt" "io" "net/url" @@ -52,7 +53,7 @@ const ( ) func PreCheckNode(c clientset.Interface) error { - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) + nodes, err := c.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) if err != nil { klog.Errorf("pre_check_get_nodes failed errmsg:%v", err) return err @@ -73,7 +74,7 @@ func PreCheckNode(c clientset.Interface) error { } func PreCheckTunnelPod(c clientset.Interface) error { - pods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) + pods, err := c.CoreV1().Pods("").List(context.Background(), metav1.ListOptions{}) if err != nil { klog.Errorf("pre_check_get_pods failed errmsg:%v", err) return err