Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update Kubernetes dependencies to 1.14.1 and adjust code #209

Merged
merged 5 commits into from
Apr 16, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
Original file line number Diff line number Diff line change
Expand Up @@ -17,51 +17,40 @@ limitations under the License.
package main

import (
"flag"
"fmt"
"os"

"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/util/flag"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/component-base/logs"
"k8s.io/kubernetes/cmd/cloud-controller-manager/app"
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
"k8s.io/kubernetes/pkg/version/verflag"

_ "github.com/digitalocean/digitalocean-cloud-controller-manager/cloud-controller-manager/do"
"github.com/golang/glog"
"github.com/spf13/pflag"
)

func init() {
healthz.DefaultHealthz()
}

func main() {
s, err := options.NewCloudControllerManagerOptions()
if err != nil {
glog.Fatalf("failed to create config options: %s", err)
}
// Bogus parameter needed until https://github.com/kubernetes/kubernetes/issues/76205
// gets resolved.
flag.CommandLine.String("cloud-provider-gce-lb-src-cidrs", "", "NOT USED (workaround for https://github.com/kubernetes/kubernetes/issues/76205)")

s.AddFlags(pflag.CommandLine)
command := app.NewCloudControllerManagerCommand()

flag.InitFlags()
// (The following comment is copied from upstream:)
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()

verflag.PrintAndExitIfRequested()

// digitalocean overrides
s.KubeCloudShared.AllowUntaggedCloud = true

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@timoreimann and I had an offline discussion about this. Because of the upstream changes were no longer able to do this, but it sounds like we need to change the parameters required to invoke the CCM. We should also update the documentation in this PR so they ride together.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch -- I pushed a commit that updates the docs. PTAL again.


config, err := s.Config()
if err != nil {
glog.Fatalf("failed to create component config: %s", err)
}

if err := app.Run(config.Complete()); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
if err := command.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
9 changes: 3 additions & 6 deletions cloud-controller-manager/do/cloud.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ import (
"golang.org/x/oauth2"

"k8s.io/client-go/informers"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller"
cloudprovider "k8s.io/cloud-provider"
)

const (
Expand Down Expand Up @@ -106,17 +105,15 @@ func init() {
})
}

func (c *cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {
func (c *cloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stop <-chan struct{}) {
clientset := clientBuilder.ClientOrDie("do-shared-informers")
sharedInformer := informers.NewSharedInformerFactory(clientset, 0)

res := NewResourcesController(c.resources, sharedInformer.Core().V1().Services(), clientset, c.client)

sharedInformer.Start(nil)
sharedInformer.WaitForCacheSync(nil)
// TODO: pass in stopCh from Initialize once supported upstream
// see https://github.com/kubernetes/kubernetes/pull/70038 for more details
go res.Run(nil)
go res.Run(stop)
}

func (c *cloud) LoadBalancer() (cloudprovider.LoadBalancer, bool) {
Expand Down
2 changes: 1 addition & 1 deletion cloud-controller-manager/do/droplets.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)

const (
Expand Down
2 changes: 1 addition & 1 deletion cloud-controller-manager/do/droplets_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
"testing"

v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"

"github.com/digitalocean/godo"
)
Expand Down
24 changes: 17 additions & 7 deletions cloud-controller-manager/do/loadbalancers.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,10 @@ import (
"strings"

v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"

"github.com/digitalocean/godo"
"github.com/golang/glog"
)

const (
Expand Down Expand Up @@ -159,7 +159,7 @@ func newLoadBalancers(resources *resources, client *godo.Client, region string)
//
// GetLoadBalancer will not modify service.
func (l *loadBalancers) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (*v1.LoadBalancerStatus, bool, error) {
lbName := cloudprovider.GetLoadBalancerName(service)
lbName := l.GetLoadBalancerName(ctx, clusterName, service)
lb, found := l.resources.LoadBalancerByName(lbName)
if !found {
return nil, false, nil
Expand All @@ -178,6 +178,16 @@ func (l *loadBalancers) GetLoadBalancer(ctx context.Context, clusterName string,
}, true, nil
}

// GetLoadBalancerName returns the name of the load balancer. Implementations must treat the
// *v1.Service parameter as read-only and not modify it.
func (l *loadBalancers) GetLoadBalancerName(ctx context.Context, clusterName string, service *v1.Service) string {
return getDefaultLoadBalancerName(service)
}

func getDefaultLoadBalancerName(service *v1.Service) string {
return cloudprovider.DefaultLoadBalancerName(service)
}

// EnsureLoadBalancer ensures that the cluster is running a load balancer for
// service.
//
Expand Down Expand Up @@ -234,7 +244,7 @@ func (l *loadBalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri
return err
}

lbName := cloudprovider.GetLoadBalancerName(service)
lbName := l.GetLoadBalancerName(ctx, clusterName, service)
lb, found := l.resources.LoadBalancerByName(lbName)
if !found {
return errLBNotFound
Expand Down Expand Up @@ -263,7 +273,7 @@ func (l *loadBalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa
return nil
}

lbName := cloudprovider.GetLoadBalancerName(service)
lbName := l.GetLoadBalancerName(ctx, clusterName, service)

lb, found := l.resources.LoadBalancerByName(lbName)
if !found {
Expand Down Expand Up @@ -295,7 +305,7 @@ func (l *loadBalancers) nodesToDropletIDs(nodes []*v1.Node) ([]int, error) {
}
addresses, err := nodeAddresses(droplet)
if err != nil {
glog.Errorf("error getting node addresses for %s: %v", droplet.Name, err)
klog.Errorf("error getting node addresses for %s: %v", droplet.Name, err)
continue
}
for _, address := range addresses {
Expand All @@ -313,7 +323,7 @@ func (l *loadBalancers) nodesToDropletIDs(nodes []*v1.Node) ([]int, error) {
// buildLoadBalancerRequest returns a *godo.LoadBalancerRequest to balance
// requests for service across nodes.
func (l *loadBalancers) buildLoadBalancerRequest(service *v1.Service, nodes []*v1.Node) (*godo.LoadBalancerRequest, error) {
lbName := cloudprovider.GetLoadBalancerName(service)
lbName := getDefaultLoadBalancerName(service)

dropletIDs, err := l.nodesToDropletIDs(nodes)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion cloud-controller-manager/do/loadbalancers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ import (

v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)

var _ cloudprovider.LoadBalancer = new(loadBalancers)
Expand Down
23 changes: 11 additions & 12 deletions cloud-controller-manager/do/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,13 @@ import (
"time"

"github.com/digitalocean/godo"
"github.com/golang/glog"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
v1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
v1lister "k8s.io/client-go/listers/core/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/klog"
)

const (
Expand Down Expand Up @@ -186,14 +185,14 @@ func (s *tickerSyncer) Sync(name string, period time.Duration, stopCh <-chan str

// manually call to avoid initial tick delay
if err := fn(); err != nil {
glog.Errorf("%s failed: %s", name, err)
klog.Errorf("%s failed: %s", name, err)
}

for {
select {
case <-ticker.C:
if err := fn(); err != nil {
glog.Errorf("%s failed: %s", name, err)
klog.Errorf("%s failed: %s", name, err)
}
case <-stopCh:
return
Expand Down Expand Up @@ -234,7 +233,7 @@ func (r *ResourcesController) Run(stopCh <-chan struct{}) {
go r.syncer.Sync("resources syncer", controllerSyncResourcesPeriod, stopCh, r.syncResources)

if r.resources.clusterID == "" {
glog.Info("No cluster ID configured -- skipping cluster dependent syncers.")
klog.Info("No cluster ID configured -- skipping cluster dependent syncers.")
return
}
go r.syncer.Sync("tags syncer", controllerSyncTagsPeriod, stopCh, r.syncTags)
Expand All @@ -246,22 +245,22 @@ func (r *ResourcesController) syncResources() error {
ctx, cancel := context.WithTimeout(context.Background(), syncResourcesTimeout)
defer cancel()

glog.V(2).Info("syncing droplet resources.")
klog.V(2).Info("syncing droplet resources.")
droplets, err := allDropletList(ctx, r.gclient)
if err != nil {
glog.Errorf("failed to sync droplet resources: %s.", err)
klog.Errorf("failed to sync droplet resources: %s.", err)
} else {
r.resources.UpdateDroplets(droplets)
glog.V(2).Info("synced droplet resources.")
klog.V(2).Info("synced droplet resources.")
}

glog.V(2).Info("syncing load-balancer resources.")
klog.V(2).Info("syncing load-balancer resources.")
lbs, err := allLoadBalancerList(ctx, r.gclient)
if err != nil {
glog.Errorf("failed to sync load-balancer resources: %s.", err)
klog.Errorf("failed to sync load-balancer resources: %s.", err)
} else {
r.resources.UpdateLoadBalancers(lbs)
glog.V(2).Info("synced load-balancer resources.")
klog.V(2).Info("synced load-balancer resources.")
}

return nil
Expand Down Expand Up @@ -293,7 +292,7 @@ func (r *ResourcesController) syncTags() error {
continue
}

name := cloudprovider.GetLoadBalancerName(svc)
name := getDefaultLoadBalancerName(svc)
if id, ok := loadBalancers[name]; ok {
res = append(res, godo.Resource{
ID: id,
Expand Down
3 changes: 1 addition & 2 deletions cloud-controller-manager/do/resources_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/pkg/cloudprovider"
)

func TestResources_DropletByID(t *testing.T) {
Expand Down Expand Up @@ -431,7 +430,7 @@ func TestResourcesController_SyncResources(t *testing.T) {

func lbName(idx int) string {
svc := createSvc(idx, false)
return cloudprovider.GetLoadBalancerName(svc)
return getDefaultLoadBalancerName(svc)
}

func createSvc(idx int, isTypeLoadBalancer bool) *corev1.Service {
Expand Down
2 changes: 1 addition & 1 deletion cloud-controller-manager/do/zones.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ import (
"context"

"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)

type zones struct {
Expand Down
2 changes: 1 addition & 1 deletion cloud-controller-manager/do/zones_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (
"testing"

"github.com/digitalocean/godo"
"k8s.io/kubernetes/pkg/cloudprovider"
cloudprovider "k8s.io/cloud-provider"
)

var _ cloudprovider.Zones = new(zones)
Expand Down
1 change: 1 addition & 0 deletions docs/example-manifests/cloud-controller-manager.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ spec:
- "/bin/digitalocean-cloud-controller-manager"
- "--cloud-provider=digitalocean"
- "--leader-elect=true"
- "--allow-untagged-cloud=true"
resources:
requests:
cpu: 100m
Expand Down
6 changes: 3 additions & 3 deletions e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)

const (
Expand Down Expand Up @@ -104,7 +104,7 @@ func TestE2E(t *testing.T) {
)
start := time.Now()
if err := wait.Poll(5*time.Second, 6*time.Minute, func() (bool, error) {
nl, err := cs.Core().Nodes().List(metav1.ListOptions{LabelSelector: "kubernetes.io/role=node"})
nl, err := cs.CoreV1().Nodes().List(metav1.ListOptions{LabelSelector: "kubernetes.io/role=node"})
if err != nil {
return false, err
}
Expand All @@ -115,7 +115,7 @@ func TestE2E(t *testing.T) {
for _, node := range gotNodes {
// Make sure the "uninitialized" node taint is missing.
for _, taint := range node.Spec.Taints {
if taint.Key == algorithm.TaintExternalCloudProvider {
if taint.Key == schedulerapi.TaintExternalCloudProvider {
continue Nodes
}
}
Expand Down
Loading