diff --git a/cmd/virtual-kubelet/main.go b/cmd/virtual-kubelet/main.go index e5298d33..3827d3a5 100644 --- a/cmd/virtual-kubelet/main.go +++ b/cmd/virtual-kubelet/main.go @@ -19,8 +19,9 @@ import ( "context" "strings" - "github.com/elotl/cloud-instance-provider/pkg/glog" + "github.com/elotl/cloud-instance-provider/pkg/klog" "github.com/elotl/cloud-instance-provider/pkg/server" + "github.com/elotl/cloud-instance-provider/pkg/util/habitat" cli "github.com/virtual-kubelet/node-cli" opencensuscli "github.com/virtual-kubelet/node-cli/opencensus" "github.com/virtual-kubelet/node-cli/opts" @@ -39,7 +40,7 @@ var ( func main() { ctx := cli.ContextWithCancelOnSignal(context.Background()) - log.L = glog.NewGlogAdapter() + log.L = klog.NewKlogAdapter() trace.T = opencensus.Adapter{} traceConfig := opencensuscli.Config{ @@ -63,10 +64,21 @@ func main() { cli.WithCLIVersion(buildVersion, buildTime), cli.WithProvider("cloud-instance-provider", func(cfg provider.InitConfig) (provider.Provider, error) { + internalIP := cfg.InternalIP + if internalIP == "" { + internalIP = habitat.GetMyIP() + if internalIP == "" { + ips := habitat.GetIPAddresses() + if len(ips) > 0 { + internalIP = ips[0] + } + } + } + log.G(ctx).Infof("node internal IP address: %q", internalIP) return server.NewInstanceProvider( cfg.ConfigPath, cfg.NodeName, - cfg.InternalIP, + internalIP, cfg.DaemonPort, serverConfig.DebugServer, cfg.ResourceManager, diff --git a/deploy/virtual-kubelet.yaml b/deploy/virtual-kubelet.yaml index 85beb5ac..c9c50954 100644 --- a/deploy/virtual-kubelet.yaml +++ b/deploy/virtual-kubelet.yaml @@ -80,6 +80,8 @@ spec: - cloud-instance-provider - --provider-config - /etc/virtual-kubelet/server.yml + - --klog.logtostderr + - --klog.v=5 image: elotl/virtual-kubelet:dev imagePullPolicy: Always name: virtual-kubelet diff --git a/pkg/api/codec.go b/pkg/api/codec.go index 4e4b58c1..80aec456 100644 --- a/pkg/api/codec.go +++ b/pkg/api/codec.go @@ -4,7 +4,7 @@ package api import ( "github.com/json-iterator/go" - "github.com/golang/glog" + "k8s.io/klog" ) var json = jsoniter.ConfigCompatibleWithStandardLibrary @@ -37,7 +37,7 @@ func (c JsonCodec) Marshal(i interface{}) ([]byte, error) { func warnIfUnversioned(t TypeVersioner) { version := t.GetAPIVersion() if version == "" { - glog.Warningf("Found empty API version in registry for %v", t) + klog.Warningf("Found empty API version in registry for %v", t) } } diff --git a/pkg/certs/factory.go b/pkg/certs/factory.go index 5fc5b580..7ddc2c96 100644 --- a/pkg/certs/factory.go +++ b/pkg/certs/factory.go @@ -10,7 +10,7 @@ import ( "github.com/docker/libkv/store" "github.com/elotl/cloud-instance-provider/pkg/etcd" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -33,7 +33,7 @@ func New(kvstore etcd.Storer) (*CertificateFactory, error) { certFactory.kvstore.Put(CertificateDirectoryPlaceholder, []byte("."), nil) err := certFactory.GetRootFromStore() if err == store.ErrKeyNotFound { - glog.Infof("Initializing Milpa root certificate") + klog.V(2).Infof("Initializing Milpa root certificate") certFactory.InitRootCert() } else if err != nil { return nil, util.WrapError(err, "Error creating Milpa PKI") diff --git a/pkg/etcd/etcd.go b/pkg/etcd/etcd.go index 4068a46a..48ac2342 100644 --- a/pkg/etcd/etcd.go +++ b/pkg/etcd/etcd.go @@ -16,8 +16,8 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/docker/libkv/store" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "golang.org/x/sys/unix" + "k8s.io/klog" ) var ( @@ -37,7 +37,7 @@ func ensureEtcdDataDir(dataDir string) error { errMsg := fmt.Sprintf("Could not create milpa storage directory at %s, please verify the directory exists and is writable by milpa. The error was", dataDir) _, err := os.Stat(dataDir) if os.IsNotExist(err) { - glog.Infof("Creating milpa data directory at %s", dataDir) + klog.V(2).Infof("Creating milpa data directory at %s", dataDir) err := os.MkdirAll(dataDir, 0750) if err != nil { return util.WrapError(err, errMsg) @@ -86,13 +86,13 @@ func (s *EtcdServer) Start(quit <-chan struct{}, wg *sync.WaitGroup) error { cfg.LCUrls = []url.URL{} } if cfg.AutoCompactionMode == "" { - glog.Info("Setting etcd compaction mode to periodic") + klog.V(2).Info("Setting etcd compaction mode to periodic") cfg.AutoCompactionMode = compactor.ModePeriodic } if cfg.AutoCompactionMode == compactor.ModePeriodic && cfg.AutoCompactionRetention == "" { cfg.AutoCompactionRetention = "1" - glog.Info("Setting etcd compaction interval to 1 hour") + klog.V(2).Info("Setting etcd compaction interval to 1 hour") } err = s.reconcileDataDirectoryValues(cfg) @@ -110,7 +110,7 @@ func (s *EtcdServer) Start(quit <-chan struct{}, wg *sync.WaitGroup) error { } select { case <-s.Proc.Server.ReadyNotify(): - glog.Info("Etcd server is ready to serve requests") + klog.V(2).Info("Etcd server is ready to serve requests") case <-time.After(60 * time.Second): s.Proc.Server.Stop() s.Proc.Close() @@ -127,7 +127,7 @@ func (s *EtcdServer) Start(quit <-chan struct{}, wg *sync.WaitGroup) error { <-quit // if we don't pause, clients will crash, it's a bad look. pause := 2 * time.Second - glog.Infof("Pausing for %ds before shutting down etcd...", int(pause.Seconds())) + klog.V(2).Infof("Pausing for %ds before shutting down etcd...", int(pause.Seconds())) time.Sleep(pause) s.Proc.Server.Stop() s.Proc.Close() diff --git a/pkg/etcd/etcd_test_helpers.go b/pkg/etcd/etcd_test_helpers.go index 5f5997c3..8d913cba 100644 --- a/pkg/etcd/etcd_test_helpers.go +++ b/pkg/etcd/etcd_test_helpers.go @@ -5,7 +5,7 @@ import ( "os" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) func SetupEmbeddedEtcdTest() (*SimpleEtcd, func(), error) { @@ -18,7 +18,7 @@ func SetupEmbeddedEtcdTest() (*SimpleEtcd, func(), error) { closer := func() { quit <- struct{}{} if err := os.RemoveAll(dataDir); err != nil { - glog.Fatal("Error removing etcd data directory") + klog.Fatal("Error removing etcd data directory") } } db := EtcdServer{ diff --git a/pkg/glog/adapter.go b/pkg/glog/adapter.go deleted file mode 100644 index 3a9ea9ee..00000000 --- a/pkg/glog/adapter.go +++ /dev/null @@ -1,129 +0,0 @@ -package glog - -import ( - "fmt" - - "github.com/golang/glog" - "github.com/virtual-kubelet/virtual-kubelet/log" -) - -const ( - errorKey = "error" -) - -type GlogAdapter struct { - fields log.Fields - extraArgsStr string - extraFormatStr string -} - -func NewGlogAdapter() *GlogAdapter { - return &GlogAdapter{} -} - -func (g *GlogAdapter) update() { - g.extraArgsStr = "" - for k, v := range g.fields { - g.extraArgsStr += fmt.Sprintf(" %s=%v", k, v) - } - g.extraFormatStr = "" - for k, v := range g.fields { - g.extraFormatStr += fmt.Sprintf(" %s=%v", k, v) - } -} - -func (g *GlogAdapter) getArgs(args ...interface{}) []interface{} { - if len(g.extraArgsStr) > 0 { - return append(args, g.extraArgsStr) - } - return args -} - -func (g *GlogAdapter) getFormat(format string) string { - return format + g.extraFormatStr -} - -func (g *GlogAdapter) Debug(args ...interface{}) { - args = g.getArgs(args...) - if glog.V(4) { - glog.InfoDepth(1, args...) - } -} - -func (g *GlogAdapter) Debugf(format string, args ...interface{}) { - format = g.getFormat(format) - if glog.V(4) { - glog.InfoDepth(1, fmt.Sprintf(format, args...)) - } -} - -func (g *GlogAdapter) Info(args ...interface{}) { - args = g.getArgs(args...) - glog.InfoDepth(1, args...) -} - -func (g *GlogAdapter) Infof(format string, args ...interface{}) { - format = g.getFormat(format) - glog.InfoDepth(1, fmt.Sprintf(format, args...)) -} - -func (g *GlogAdapter) Warn(args ...interface{}) { - args = g.getArgs(args...) - glog.WarningDepth(1, args...) -} - -func (g *GlogAdapter) Warnf(format string, args ...interface{}) { - format = g.getFormat(format) - glog.WarningDepth(1, fmt.Sprintf(format, args...)) -} - -func (g *GlogAdapter) Error(args ...interface{}) { - args = g.getArgs(args...) - glog.ErrorDepth(1, args...) -} - -func (g *GlogAdapter) Errorf(format string, args ...interface{}) { - format = g.getFormat(format) - glog.ErrorDepth(1, fmt.Sprintf(format, args...)) -} - -func (g *GlogAdapter) Fatal(args ...interface{}) { - args = g.getArgs(args...) - glog.FatalDepth(1, args...) -} - -func (g *GlogAdapter) Fatalf(format string, args ...interface{}) { - format = g.getFormat(format) - glog.FatalDepth(1, fmt.Sprintf(format, args...)) -} - -func (g *GlogAdapter) WithField(key string, value interface{}) log.Logger { - logger := &GlogAdapter{ - fields: map[string]interface{}{ - key: value, - }, - } - logger.update() - return logger -} - -func (g *GlogAdapter) WithFields(fields log.Fields) log.Logger { - logger := &GlogAdapter{ - fields: make(map[string]interface{}), - } - for k, v := range fields { - logger.fields[k] = v - } - logger.update() - return logger -} - -func (g *GlogAdapter) WithError(err error) log.Logger { - logger := &GlogAdapter{ - fields: map[string]interface{}{ - errorKey: err, - }, - } - logger.update() - return logger -} diff --git a/pkg/klog/adapter.go b/pkg/klog/adapter.go new file mode 100644 index 00000000..972fbbf0 --- /dev/null +++ b/pkg/klog/adapter.go @@ -0,0 +1,132 @@ +package klog + +import ( + "flag" + "fmt" + + "github.com/virtual-kubelet/virtual-kubelet/log" + "k8s.io/klog" +) + +const ( + errorKey = "error" +) + +type KlogAdapter struct { + fields log.Fields + extraArgsStr string + extraFormatStr string +} + +func NewKlogAdapter() *KlogAdapter { + klogFlags := flag.NewFlagSet("klog", flag.ExitOnError) + klog.InitFlags(klogFlags) + return &KlogAdapter{} +} + +func (g *KlogAdapter) update() { + g.extraArgsStr = "" + for k, v := range g.fields { + g.extraArgsStr += fmt.Sprintf(" %s=%v", k, v) + } + g.extraFormatStr = "" + for k, v := range g.fields { + g.extraFormatStr += fmt.Sprintf(" %s=%v", k, v) + } +} + +func (g *KlogAdapter) getArgs(args ...interface{}) []interface{} { + if len(g.extraArgsStr) > 0 { + return append(args, g.extraArgsStr) + } + return args +} + +func (g *KlogAdapter) getFormat(format string) string { + return format + g.extraFormatStr +} + +func (g *KlogAdapter) Debug(args ...interface{}) { + args = g.getArgs(args...) + if klog.V(4) { + klog.InfoDepth(1, args...) + } +} + +func (g *KlogAdapter) Debugf(format string, args ...interface{}) { + format = g.getFormat(format) + if klog.V(4) { + klog.InfoDepth(1, fmt.Sprintf(format, args...)) + } +} + +func (g *KlogAdapter) Info(args ...interface{}) { + args = g.getArgs(args...) + klog.InfoDepth(1, args...) +} + +func (g *KlogAdapter) Infof(format string, args ...interface{}) { + format = g.getFormat(format) + klog.InfoDepth(1, fmt.Sprintf(format, args...)) +} + +func (g *KlogAdapter) Warn(args ...interface{}) { + args = g.getArgs(args...) + klog.WarningDepth(1, args...) +} + +func (g *KlogAdapter) Warnf(format string, args ...interface{}) { + format = g.getFormat(format) + klog.WarningDepth(1, fmt.Sprintf(format, args...)) +} + +func (g *KlogAdapter) Error(args ...interface{}) { + args = g.getArgs(args...) + klog.ErrorDepth(1, args...) +} + +func (g *KlogAdapter) Errorf(format string, args ...interface{}) { + format = g.getFormat(format) + klog.ErrorDepth(1, fmt.Sprintf(format, args...)) +} + +func (g *KlogAdapter) Fatal(args ...interface{}) { + args = g.getArgs(args...) + klog.FatalDepth(1, args...) +} + +func (g *KlogAdapter) Fatalf(format string, args ...interface{}) { + format = g.getFormat(format) + klog.FatalDepth(1, fmt.Sprintf(format, args...)) +} + +func (g *KlogAdapter) WithField(key string, value interface{}) log.Logger { + logger := &KlogAdapter{ + fields: map[string]interface{}{ + key: value, + }, + } + logger.update() + return logger +} + +func (g *KlogAdapter) WithFields(fields log.Fields) log.Logger { + logger := &KlogAdapter{ + fields: make(map[string]interface{}), + } + for k, v := range fields { + logger.fields[k] = v + } + logger.update() + return logger +} + +func (g *KlogAdapter) WithError(err error) log.Logger { + logger := &KlogAdapter{ + fields: map[string]interface{}{ + errorKey: err, + }, + } + logger.update() + return logger +} diff --git a/pkg/labels/selector.go b/pkg/labels/selector.go index 3c5e63c6..66077214 100644 --- a/pkg/labels/selector.go +++ b/pkg/labels/selector.go @@ -28,7 +28,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/selection" "github.com/elotl/cloud-instance-provider/pkg/util/sets" "github.com/elotl/cloud-instance-provider/pkg/util/validation" - "github.com/golang/glog" + "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -193,13 +193,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -207,7 +207,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } diff --git a/pkg/milpactl/printer.go b/pkg/milpactl/printer.go index d2a9089c..448eae8a 100644 --- a/pkg/milpactl/printer.go +++ b/pkg/milpactl/printer.go @@ -18,8 +18,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/labels" "github.com/ghodss/yaml" - "github.com/golang/glog" "github.com/spf13/cobra" + "k8s.io/klog" ) // Most of this was taken from k8s at commit @@ -134,7 +134,7 @@ func NewPrettyPrinter(noHeaders, wide, showAll, showLabels, absoluteTimestamps b func (h *PrettyPrinter) Handler(columns []string, printFunc interface{}) error { printFuncValue := reflect.ValueOf(printFunc) if err := h.validatePrintHandlerFunc(printFuncValue); err != nil { - glog.Errorf("Unable to add print handler: %v", err) + klog.Errorf("Unable to add print handler: %v", err) return err } objType := printFuncValue.Type().In(0) diff --git a/pkg/nodeclient/itzoclient.go b/pkg/nodeclient/itzoclient.go index 0031122f..5da991f2 100644 --- a/pkg/nodeclient/itzoclient.go +++ b/pkg/nodeclient/itzoclient.go @@ -23,8 +23,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/timeoutmap" "github.com/elotl/wsstream" - "github.com/golang/glog" "github.com/gorilla/websocket" + "k8s.io/klog" ) var ( @@ -112,7 +112,7 @@ func (fac *ItzoClientFactory) GetWSStream(addy []api.NetworkAddress, path string if bodyerr == nil { e := fmt.Errorf("Websocket dial error: %v - %s", err, string(bodyContents)) - glog.Error(e) + klog.Error(e) return nil, e } } @@ -276,17 +276,17 @@ func (c *ItzoClient) GetLogs(unit string, lines, bytes int) ([]byte, error) { } resp, err := c.httpClient.Get(url) if err != nil { - glog.Errorf("Error getting logs from %s: %s", c.instanceIp, err) + klog.Errorf("Error getting logs from %s: %s", c.instanceIp, err) return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - glog.Errorf("Error reading log reply from %s: %s", c.instanceIp, err) + klog.Errorf("Error reading log reply from %s: %s", c.instanceIp, err) return nil, err } if resp.StatusCode/200 != 1 { - glog.Errorf("HTTP error getting log from %s: %s (%d); %s", + klog.Errorf("HTTP error getting log from %s: %s (%d); %s", c.instanceIp, resp.Status, resp.StatusCode, string(body)) return nil, fmt.Errorf("Failed to fetch logs: %s (%d); %s", resp.Status, resp.StatusCode, string(body)) @@ -309,17 +309,17 @@ func (c *ItzoClient) GetFile(path string, lines, bytes int) ([]byte, error) { // Todo: combine with logs getter? resp, err := c.httpClient.Get(url) if err != nil { - glog.Errorf("Error getting file from %s: %s", c.instanceIp, err) + klog.Errorf("Error getting file from %s: %s", c.instanceIp, err) return nil, err } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { - glog.Errorf("Error reading file reply from %s: %s", c.instanceIp, err) + klog.Errorf("Error reading file reply from %s: %s", c.instanceIp, err) return nil, err } if resp.StatusCode/200 != 1 { - glog.Errorf("HTTP error getting file from %s: %s (%d); %s", + klog.Errorf("HTTP error getting file from %s: %s (%d); %s", c.instanceIp, resp.Status, resp.StatusCode, string(body)) return nil, fmt.Errorf("Failed to fetch file: %s (%d); %s", resp.Status, resp.StatusCode, string(body)) @@ -337,7 +337,7 @@ func (c *ItzoClient) UpdateUnits(pp api.PodParameters) error { resp, err := c.httpClient.Post(url, "application/json", buf) _, err = handleResponse(resp, err) if err != nil { - glog.Errorf("Error sending pod update to %s: %v", c.instanceIp, err) + klog.Errorf("Error sending pod update to %s: %v", c.instanceIp, err) return util.WrapError(err, "Error sending pod update to %s", c.instanceIp) } @@ -354,22 +354,22 @@ func (c *ItzoClient) Deploy(pod, name string, data io.Reader) error { go func() { fullUrl := createUrl( c.baseURL, fmt.Sprintf("rest/v1/deploy/%s/%s", pod, name)) - glog.Infof("deploying package to %s", fullUrl) + klog.V(2).Infof("deploying package to %s", fullUrl) req, err := http.NewRequest("POST", fullUrl, pr) if err != nil { - glog.Errorf("Error creating new deploy POST request: %v\n", err) + klog.Errorf("Error creating new deploy POST request: %v\n", err) ch <- err return } req.Header.Add("Content-Type", writer.FormDataContentType()) resp, err := c.httpClient.Do(req) if err != nil { - glog.Errorf("Error sending deploy POST request: %v\n", err) + klog.Errorf("Error sending deploy POST request: %v\n", err) ch <- err return } if _, err = handleResponse(resp, err); err != nil { - glog.Errorf("Error response %+v to deploy POST request: %v\n", + klog.Errorf("Error response %+v to deploy POST request: %v\n", *resp, err) ch <- err return diff --git a/pkg/server/cloud/aws/aws.go b/pkg/server/cloud/aws/aws.go index efc1b543..96c0c4d1 100644 --- a/pkg/server/cloud/aws/aws.go +++ b/pkg/server/cloud/aws/aws.go @@ -17,7 +17,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/errors" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -98,18 +98,18 @@ func CheckConnection() error { if err != nil { return util.WrapError(err, "Check connection failed setting up an ec2 client") } - glog.Infof("Checking for credential errors") + klog.V(2).Infof("Checking for credential errors") val, err := client.Config.Credentials.Get() if err != nil { return util.WrapError(err, "Error validating AWS credentials") } - glog.Infof("Using credentials from %s", val.ProviderName) + klog.V(2).Infof("Using credentials from %s", val.ProviderName) // Validate that region is set. I'm pretty sure that all our // authentication methods need this to be set. if client.Config.Region == nil || *client.Config.Region == "" { return fmt.Errorf("Empty region in AWS configuraiton, please specify a region in the config file or environment") } - glog.Infof("Validating read access") + klog.V(2).Infof("Validating read access") _, err = client.DescribeInstances(nil) return err } @@ -291,7 +291,7 @@ func (c *AwsEC2) ValidateMarketplaceLicense() error { } for _, productCode := range doc.MarketplaceProductCodes { if productDescription, ok := milpaMarketplaceCodes[productCode]; ok { - glog.Infof("Running on marketplace, product: %q (%s)", + klog.V(2).Infof("Running on marketplace, product: %q (%s)", productDescription, productCode) return nil } diff --git a/pkg/server/cloud/aws/container_instances.go b/pkg/server/cloud/aws/container_instances.go index 7abccee0..2e10da69 100644 --- a/pkg/server/cloud/aws/container_instances.go +++ b/pkg/server/cloud/aws/container_instances.go @@ -15,7 +15,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/instanceselector" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -106,7 +106,7 @@ func (c *AwsEC2) EnsureContainerInstanceCluster() error { return fmt.Errorf("error setting up ECS cluster, task long ARN formats is not enabled") } - glog.Infof("Ensuring ECS cluster %s exists", c.ecsClusterName) + klog.V(2).Infof("Ensuring ECS cluster %s exists", c.ecsClusterName) output, err := c.ecs.DescribeClusters(&ecs.DescribeClustersInput{ Clusters: aws.StringSlice([]string{c.ecsClusterName}), }) @@ -115,7 +115,7 @@ func (c *AwsEC2) EnsureContainerInstanceCluster() error { } if len(output.Clusters) == 0 { - glog.Infof("Creating ECS cluster %s", c.ecsClusterName) + klog.V(2).Infof("Creating ECS cluster %s", c.ecsClusterName) val := fmt.Sprintf("Milpa Controller %s", c.controllerID) tags := []*ecs.Tag{{ Key: aws.String("Created by Milpa Controller"), @@ -498,7 +498,7 @@ func (c *AwsEC2) StopContainerInstance(containerInstanceID string) error { return fmt.Errorf("cannot stop containerInstanceID %s: container instances client is not configured", containerInstanceID) } - glog.Infof("Stopping container instance %s", containerInstanceID) + klog.V(2).Infof("Stopping container instance %s", containerInstanceID) stopTaskInput := &ecs.StopTaskInput{ Cluster: aws.String(c.ecsClusterName), Reason: aws.String("Stopped by Milpa"), @@ -514,16 +514,16 @@ func (c *AwsEC2) StopContainerInstance(containerInstanceID string) error { if stopTaskOutput.Task != nil { err := c.DeregisterTaskDefinition(aws.StringValue(stopTaskOutput.Task.TaskDefinitionArn)) if err != nil { - glog.Warningf("Error deleting task definition: %s. The task definition will be cleaned up later", err.Error()) + klog.Warningf("Error deleting task definition: %s. The task definition will be cleaned up later", err.Error()) } } else { - glog.Warningf("Task definition could not be found for %s, defering deletion of task definition", containerInstanceID) + klog.Warningf("Task definition could not be found for %s, defering deletion of task definition", containerInstanceID) } return nil } func (c *AwsEC2) DeregisterTaskDefinition(taskARN string) error { - glog.Infof("Deregistering task definition %s", taskARN) + klog.V(2).Infof("Deregistering task definition %s", taskARN) _, err := c.ecs.DeregisterTaskDefinition( &ecs.DeregisterTaskDefinitionInput{ TaskDefinition: aws.String(taskARN), @@ -535,7 +535,7 @@ func (c *AwsEC2) WaitForContainerInstanceRunning(pod *api.Pod) (*api.Pod, error) if c.ecs == nil { return nil, fmt.Errorf("Could not wait for container instance running: ECS client is not configured") } - glog.Infof("Waiting for task %s to be running", pod.Status.BoundInstanceID) + klog.V(2).Infof("Waiting for task %s to be running", pod.Status.BoundInstanceID) lastStatus := "" observedPending := false eniID := "" @@ -574,7 +574,7 @@ func (c *AwsEC2) WaitForContainerInstanceRunning(pod *api.Pod) (*api.Pod, error) if !pod.Spec.Resources.PrivateIPOnly { addys, err := c.getENIAddresses(eniID) if err != nil { - glog.Errorf("Error getting addresses from cloud for pod %s: %s", pod.Name, err.Error()) + klog.Errorf("Error getting addresses from cloud for pod %s: %s", pod.Name, err.Error()) } else { pod.Status.Addresses = append(pod.Status.Addresses, addys...) } diff --git a/pkg/server/cloud/aws/instances.go b/pkg/server/cloud/aws/instances.go index 113d9a16..9b6b7053 100644 --- a/pkg/server/cloud/aws/instances.go +++ b/pkg/server/cloud/aws/instances.go @@ -11,8 +11,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" uuid "github.com/satori/go.uuid" + "k8s.io/klog" ) const ( @@ -27,7 +27,7 @@ func (e *AwsEC2) StopInstance(instanceID string) error { InstanceIds: awsInstanceIDs, }) if err != nil { - glog.Errorf("Error terminating instance: %v", err) + klog.Errorf("Error terminating instance: %v", err) // todo, check on status of instance, set status of instance // based on that, prepare to come back and clean this // inconsistency up @@ -106,7 +106,7 @@ func (e *AwsEC2) getFirstVolume(instanceId string) *ec2.Volume { } result, err := e.client.DescribeVolumes(input) if err != nil { - glog.Errorf("Error retrieving list of volumes attached to %s: %v", + klog.Errorf("Error retrieving list of volumes attached to %s: %v", instanceId, err) return nil } @@ -121,11 +121,11 @@ func (e *AwsEC2) ResizeVolume(node *api.Node, size int64) (error, bool) { node.Name, vol), false } if *vol.Size >= size { - glog.Infof("Volume on node %s is %dGiB >= %dGiB", + klog.V(2).Infof("Volume on node %s is %dGiB >= %dGiB", node.Name, *vol.Size, size) return nil, false } - glog.Infof("Resizing volume to %dGiB for node: %v", size, node) + klog.V(2).Infof("Resizing volume to %dGiB for node: %v", size, node) result, err := e.client.ModifyVolume(&ec2.ModifyVolumeInput{ Size: aws.Int64(size), VolumeId: aws.String(*vol.VolumeId), @@ -147,7 +147,7 @@ func (e *AwsEC2) ResizeVolume(node *api.Node, size int64) (error, bool) { statusmsg = *result.VolumeModification.StatusMessage } if targetsize != size { - glog.Errorf("Error resizing volume for %v to %dGiB: state %s status %s", + klog.Errorf("Error resizing volume for %v to %dGiB: state %s status %s", node, size, state, statusmsg) return util.WrapError(err, "Failed to resize volume"), false } @@ -160,11 +160,11 @@ func (e *AwsEC2) ResizeVolume(node *api.Node, size int64) (error, bool) { node.Name, vol), false } if *vol.Size >= size { - glog.Infof("Volume on node %s is %dGiB >= %dGiB", + klog.V(2).Infof("Volume on node %s is %dGiB >= %dGiB", node.Name, *vol.Size, size) return nil, true } else { - glog.Infof("Resizing volume on %s: currently %dGiB, requested %dGiB", + klog.V(2).Infof("Resizing volume on %s: currently %dGiB, requested %dGiB", node.Name, *vol.Size, size) } } @@ -192,12 +192,12 @@ func (e *AwsEC2) GetImageId(tags cloud.BootImageTags) (string, error) { } resp, err := e.client.DescribeImages(input) if err != nil { - glog.Errorf("Error getting image list for tags %v: %v", tags, err) + klog.Errorf("Error getting image list for tags %v: %v", tags, err) return "", err } if len(resp.Images) == 0 { msg := fmt.Sprintf("No images found for owner %v", e.imageOwnerID) - glog.Errorf("%s", msg) + klog.Errorf("%s", msg) return "", fmt.Errorf("%s", msg) } var images []cloud.Image @@ -212,7 +212,7 @@ func (e *AwsEC2) GetImageId(tags cloud.BootImageTags) (string, error) { } func (e *AwsEC2) StartNode(node *api.Node, metadata string) (*cloud.StartNodeResult, error) { - glog.Infof("Starting instance for node: %v", node) + klog.V(2).Infof("Starting instance for node: %v", node) tags := e.getNodeTags(node) tagSpec := ec2.TagSpecification{ ResourceType: aws.String("instance"), @@ -221,7 +221,7 @@ func (e *AwsEC2) StartNode(node *api.Node, metadata string) (*cloud.StartNodeRes volSizeGiB := cloud.ToSaneVolumeSize(node.Spec.Resources.VolumeSize) devices := e.getBlockDeviceMapping(volSizeGiB) networkSpec := e.getInstanceNetworkSpec(node.Spec.Resources.PrivateIPOnly) - glog.Infof("Starting node with security groups: %v subnet: '%s'", + klog.V(2).Infof("Starting node with security groups: %v subnet: '%s'", e.bootSecurityGroupIDs, e.subnetID) result, err := e.client.RunInstances(&ec2.RunInstancesInput{ ImageId: aws.String(node.Spec.BootImage), @@ -250,7 +250,7 @@ func (e *AwsEC2) StartNode(node *api.Node, metadata string) (*cloud.StartNodeRes return nil, fmt.Errorf("Could not get instance info at result.Instances") } cloudID := aws.StringValue(result.Instances[0].InstanceId) - glog.Infof("Started instance: %s", cloudID) + klog.V(2).Infof("Started instance: %s", cloudID) startResult := &cloud.StartNodeResult{ InstanceID: cloudID, AvailabilityZone: e.availabilityZone, @@ -261,7 +261,7 @@ func (e *AwsEC2) StartNode(node *api.Node, metadata string) (*cloud.StartNodeRes // This isn't terribly different from Start node but there are // some minor differences. We'll capture errors correctly here and there func (e *AwsEC2) StartSpotNode(node *api.Node, metadata string) (*cloud.StartNodeResult, error) { - glog.Infof("Starting instance for node: %v", node) + klog.V(2).Infof("Starting instance for node: %v", node) tags := e.getNodeTags(node) tagSpec := ec2.TagSpecification{ ResourceType: aws.String("instance"), @@ -269,11 +269,11 @@ func (e *AwsEC2) StartSpotNode(node *api.Node, metadata string) (*cloud.StartNod } var err error //var subnet *cloud.SubnetAttributes - glog.Infof("Starting spot node in: %s", e.subnetID) + klog.V(2).Infof("Starting spot node in: %s", e.subnetID) volSizeGiB := cloud.ToSaneVolumeSize(node.Spec.Resources.VolumeSize) devices := e.getBlockDeviceMapping(volSizeGiB) networkSpec := e.getInstanceNetworkSpec(node.Spec.Resources.PrivateIPOnly) - glog.Infof("Starting node with security groups: %v subnet: '%s'", + klog.V(2).Infof("Starting node with security groups: %v subnet: '%s'", e.bootSecurityGroupIDs, e.subnetID) result, err := e.client.RunInstances(&ec2.RunInstancesInput{ ImageId: aws.String(node.Spec.BootImage), @@ -312,7 +312,7 @@ func (e *AwsEC2) StartSpotNode(node *api.Node, metadata string) (*cloud.StartNod return nil, fmt.Errorf("Could not get instance info at result.Instances") } cloudID := aws.StringValue(result.Instances[0].InstanceId) - glog.Infof("Started instance: %s", cloudID) + klog.V(2).Infof("Started instance: %s", cloudID) startResult := &cloud.StartNodeResult{ InstanceID: cloudID, AvailabilityZone: e.availabilityZone, @@ -485,7 +485,7 @@ func (e *AwsEC2) listInstancesHelper(filters []*ec2.Filter) ([]cloud.CloudInstan func (e *AwsEC2) AddInstanceTags(iid string, labels map[string]string) error { awsTags, err := ec2TagsFromLabels(iid, labels) if err != nil { - glog.Warning(err) + klog.Warning(err) } if len(awsTags) > 0 { _, err = e.client.CreateTags(&ec2.CreateTagsInput{ diff --git a/pkg/server/cloud/aws/network.go b/pkg/server/cloud/aws/network.go index 90bfdb29..c17bad8e 100644 --- a/pkg/server/cloud/aws/network.go +++ b/pkg/server/cloud/aws/network.go @@ -9,7 +9,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) // Errors to be aware of in the future: @@ -34,7 +34,7 @@ func detectCurrentVPC() (string, error) { vpcPath := fmt.Sprintf("network/interfaces/macs/%s/vpc-id", mac) vpcResponse, err := GetMetadata(vpcPath) if err != nil { - glog.Errorf("Could not get vpc for mac address: %s\n", mac) + klog.Errorf("Could not get vpc for mac address: %s\n", mac) continue } vpcs.Insert(vpcResponse) @@ -60,7 +60,7 @@ func detectCurrentSubnet() (string, error) { idPath := fmt.Sprintf("network/interfaces/macs/%s/subnet-id", mac) response, err := GetMetadata(idPath) if err != nil { - glog.Errorf("Could not get subnet ID for mac address %s, %v\n", mac, err) + klog.Errorf("Could not get subnet ID for mac address %s, %v\n", mac, err) continue } subnetIDs.Insert(response) @@ -127,14 +127,14 @@ func (e *AwsEC2) assertVPCExists(vpcID string) (string, string, error) { return "", "", err } // // if we found a VPC, grab the data out of it - glog.Infoln("Current vpc: ", + klog.V(2).Infoln("Current vpc: ", aws.StringValue(vpcs[0].VpcId), aws.StringValue(vpcs[0].CidrBlock)) return aws.StringValue(vpcs[0].VpcId), aws.StringValue(vpcs[0].CidrBlock), nil } func (e *AwsEC2) GetSubnets() ([]cloud.SubnetAttributes, error) { - glog.Infof("Getting subnets and availability zones for VPC %s", e.vpcID) + klog.V(2).Infof("Getting subnets and availability zones for VPC %s", e.vpcID) vpcFilters := []*ec2.Filter{ { Name: aws.String("vpc-id"), @@ -182,7 +182,7 @@ func makeMilpaSubnets(awsSubnets []*ec2.Subnet, rts []*ec2.RouteTable) ([]cloud. addressType := cloud.PrivateAddress isPublic, err := isSubnetPublic(rts, subnetID) if err != nil { - glog.Errorf("could not compute if %s is public subnet: %v", subnetID, err) + klog.Errorf("could not compute if %s is public subnet: %v", subnetID, err) continue } if isPublic { @@ -240,7 +240,7 @@ func isSubnetPublic(rt []*ec2.RouteTable, subnetID string) (bool, error) { for _, table := range rt { for _, assoc := range table.Associations { if aws.BoolValue(assoc.Main) == true { - glog.V(4).Infof("Assuming implicit use of main routing table %s for %s", + klog.V(4).Infof("Assuming implicit use of main routing table %s for %s", aws.StringValue(table.RouteTableId), subnetID) subnetTable = table break diff --git a/pkg/server/cloud/aws/security_groups.go b/pkg/server/cloud/aws/security_groups.go index 24a860ce..622e681f 100644 --- a/pkg/server/cloud/aws/security_groups.go +++ b/pkg/server/cloud/aws/security_groups.go @@ -12,7 +12,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) func (c *AwsEC2) SetBootSecurityGroupIDs(ids []string) { @@ -54,7 +54,7 @@ func (c *AwsEC2) EnsureMilpaSecurityGroups(extraCIDRs, extraGroupIDs []string) e return util.WrapError(err, "Could not setup Milpa API cloud firewall rules") } ids := append(extraGroupIDs, apiGroup.ID) - glog.Infoln("security group name", apiGroupName, ids) + klog.V(2).Infoln("security group name", apiGroupName, ids) c.SetBootSecurityGroupIDs(ids) return nil } @@ -207,7 +207,7 @@ func (e *AwsEC2) DeleteSecurityGroup(groupID string) error { if err != nil { return util.WrapError(err, "Could not delete security group") } - glog.Infof("Deleted security group %s", groupID) + klog.V(2).Infof("Deleted security group %s", groupID) return nil } diff --git a/pkg/server/cloud/azure/image_controller.go b/pkg/server/cloud/azure/image_controller.go index ae8f4c90..57bf97c7 100644 --- a/pkg/server/cloud/azure/image_controller.go +++ b/pkg/server/cloud/azure/image_controller.go @@ -16,8 +16,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util/controllerqueue" "github.com/elotl/cloud-instance-provider/pkg/util/jitter" - "github.com/golang/glog" "github.com/uber-go/atomic" + "k8s.io/klog" ) const ( @@ -62,16 +62,16 @@ func (ic *ImageController) FullSyncLoop(quit <-chan struct{}, wg *sync.WaitGroup fullSyncTicker := jitter.NewTicker(20*time.Minute, 10*time.Minute) defer fullSyncTicker.Stop() if err := ic.syncBestBlob(); err != nil { - glog.Errorln("Error doing full sync of image controller:", err) + klog.Errorln("Error doing full sync of image controller:", err) } for { select { case <-fullSyncTicker.C: if err := ic.syncBestBlob(); err != nil { - glog.Errorln("Error doing full sync of image controller", err) + klog.Errorln("Error doing full sync of image controller", err) } case <-quit: - glog.Info("Exiting Azure Image Controller Sync Loop") + klog.V(2).Info("Exiting Azure Image Controller Sync Loop") return } } @@ -195,7 +195,7 @@ func (ic *ImageController) Dump() []byte { } b, err := json.MarshalIndent(dumpStruct, "", " ") if err != nil { - glog.Errorln("Error dumping data from Azure Image Controller", err) + klog.Errorln("Error dumping data from Azure Image Controller", err) return nil } return b @@ -214,7 +214,7 @@ func (ic *ImageController) getAccountPrimaryKey(accountName string) string { response, err := ic.az.storage.ListKeys( timeoutCtx, ic.resourceGroupName, accountName) if err != nil { - glog.Errorf("Failed to list account keys: %v", err) + klog.Errorf("Failed to list account keys: %v", err) return "" } return *(((*response.Keys)[0]).Value) @@ -276,7 +276,7 @@ func (ic *ImageController) ensureContainer(accountName, containername string) er timeoutCtx, azblob.LeaseAccessConditions{}) if err != nil { if !isContainerNotFoundError(err) { - glog.Errorf("Checking container %s failed: %v", containerName, err) + klog.Errorf("Checking container %s failed: %v", containerName, err) return err } timeoutCtx, cancel = context.WithTimeout(ctx, azureDefaultTimeout) @@ -284,7 +284,7 @@ func (ic *ImageController) ensureContainer(accountName, containername string) er _, err = container.Create( timeoutCtx, azblob.Metadata{}, azblob.PublicAccessNone) if err != nil { - glog.Errorf("Creating container %s failed: %v", containerName, err) + klog.Errorf("Creating container %s failed: %v", containerName, err) return err } } @@ -320,7 +320,7 @@ func (ic *ImageController) copyBlob(accountName, containerName, blobName string) } time.Sleep(3 * time.Second) } - glog.Infof("Copying blob %s finished", blobName) + klog.V(2).Infof("Copying blob %s finished", blobName) return dstBlob.String(), nil } @@ -342,7 +342,7 @@ func (ic *ImageController) syncSingleBlob(blobName string) error { ctx := context.Background() err := ic.ensureContainer(accountName, containerName) if err != nil { - glog.Errorf("Error checking container %s: %v", containerName, err) + klog.Errorf("Error checking container %s: %v", containerName, err) return err } timeoutCtx, cancel := context.WithTimeout(ctx, azureDefaultTimeout) @@ -352,12 +352,12 @@ func (ic *ImageController) syncSingleBlob(blobName string) error { _, err = blob.GetProperties(timeoutCtx, azblob.BlobAccessConditions{}) if err != nil { if !isBlobNotFoundError(err) { - glog.Errorf("Error checking blob %s: %v", blobName, err) + klog.Errorf("Error checking blob %s: %v", blobName, err) return err } url, err = ic.copyBlob(accountName, containerName, blobName) if err != nil { - glog.Errorf("Error copying blob %s: %v", blobName, err) + klog.Errorf("Error copying blob %s: %v", blobName, err) return err } } else { @@ -368,7 +368,7 @@ func (ic *ImageController) syncSingleBlob(blobName string) error { defer cancel() img, err := ic.az.images.Get(timeoutCtx, ic.resourceGroupName, imageName, "") if err != nil && !isNotFoundError(err) { - glog.Errorf("Error checking image %s: %v", imageName, err) + klog.Errorf("Error checking image %s: %v", imageName, err) return err } else if err == nil { if ic.imageParametersMatch(img) { @@ -378,7 +378,7 @@ func (ic *ImageController) syncSingleBlob(blobName string) error { } // Old image parameters don't match, warn the user that we // are recreating the image - glog.Warningln("Image parameters are out of sync, recreating image") + klog.Warningln("Image parameters are out of sync, recreating image") timeoutCtx, cancel = context.WithTimeout(ctx, azureDefaultTimeout) defer cancel() future, err := ic.az.images.Delete( @@ -439,14 +439,14 @@ func (ic *ImageController) syncSingleBlob(blobName string) error { return fmt.Errorf("Failed to finish creating image %s: %v\n", imageName, err) } - glog.Infof("Created image %s from blob %s", imageName, url) + klog.V(2).Infof("Created image %s from blob %s", imageName, url) return nil } func (ic *ImageController) WaitForAvailable() { for !ic.isSynced.Load() { - glog.Infoln("Waiting for azure disk image to sync") + klog.V(2).Infoln("Waiting for azure disk image to sync") time.Sleep(3 * time.Second) } - glog.Infoln("Image synced") + klog.V(2).Infoln("Image synced") } diff --git a/pkg/server/cloud/azure/instances.go b/pkg/server/cloud/azure/instances.go index ff750e64..b3bf1042 100644 --- a/pkg/server/cloud/azure/instances.go +++ b/pkg/server/cloud/azure/instances.go @@ -12,8 +12,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" ) var ( @@ -133,7 +133,7 @@ func (az *AzureClient) createNIC(instanceID string, ipID string) (string, error) } func (az *AzureClient) StartNode(node *api.Node, metadata string) (*cloud.StartNodeResult, error) { - glog.Infof("Starting instance for node: %v", node) + klog.V(2).Infof("Starting instance for node: %v", node) instanceID := makeInstanceID(az.controllerID, node.Name) err := az.createResourceGroup(instanceID) if err != nil { @@ -142,7 +142,7 @@ func (az *AzureClient) StartNode(node *api.Node, metadata string) (*cloud.StartN cleanup := func() { err := az.DeleteResourceGroup(instanceID) if err != nil { - glog.Errorln( + klog.Errorln( "Error deleting azure resource group after start failure", err, ) @@ -299,7 +299,7 @@ func (az *AzureClient) WaitForRunning(node *api.Node) ([]api.NetworkAddress, err } func (az *AzureClient) SetSustainedCPU(node *api.Node, enabled bool) error { - glog.Infoln("Setting sustained CPU in Azure has no impact") + klog.V(2).Infoln("Setting sustained CPU in Azure has no impact") return nil } @@ -409,7 +409,7 @@ func getSecurityGroupsFromInterface(iface network.Interface) []cloud.SecurityGro func (az *AzureClient) AddInstanceTags(iid string, labels map[string]string) error { newTags, err := filterLabelsForTags(iid, labels) if err != nil { - glog.Warning(err) + klog.Warning(err) } if len(newTags) > 0 { ctx := context.Background() diff --git a/pkg/server/cloud/azure/metadata.go b/pkg/server/cloud/azure/metadata.go index e9da4130..2eb3bec9 100644 --- a/pkg/server/cloud/azure/metadata.go +++ b/pkg/server/cloud/azure/metadata.go @@ -7,7 +7,7 @@ import ( "time" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -54,7 +54,7 @@ func getMetadataInstanceName() (string, string) { } err = json.Unmarshal([]byte(data), &vm) if err != nil { - glog.Errorln("Could not unmarshal azure instance metadata", err.Error()) + klog.Errorln("Could not unmarshal azure instance metadata", err.Error()) return "", "" } return vm.ResourceGroupName, vm.Name diff --git a/pkg/server/cloud/azure/network.go b/pkg/server/cloud/azure/network.go index 8c33d498..8b6cc4cc 100644 --- a/pkg/server/cloud/azure/network.go +++ b/pkg/server/cloud/azure/network.go @@ -13,7 +13,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) type VirtualNetworkAttributes struct { @@ -126,7 +126,7 @@ func (az *AzureClient) GetVMNetworks(vmResourceGroup, vmName string) (vNets, sub defer cancel() vm, err := az.vms.Get(timeoutCtx, vmResourceGroup, vmName, "") if err != nil { - glog.Infof("Could not find controller VM %s/%s in subscription", + klog.V(2).Infof("Could not find controller VM %s/%s in subscription", vmResourceGroup, vmName) return } @@ -143,7 +143,7 @@ func (az *AzureClient) GetVMNetworks(vmResourceGroup, vmName string) (vNets, sub } details, err := azure.ParseResourceID(nicID) if err != nil { - glog.Errorln("Error parsing resource ID for controller NIC", err) + klog.Errorln("Error parsing resource ID for controller NIC", err) continue } nicResourceGroup := details.ResourceGroup @@ -152,7 +152,7 @@ func (az *AzureClient) GetVMNetworks(vmResourceGroup, vmName string) (vNets, sub defer cancel() nic, err := az.nics.Get(timeoutCtx, nicResourceGroup, nicName, "") if err != nil { - glog.Infof("Could not find controller NIC %s/%s in subscription", + klog.V(2).Infof("Could not find controller NIC %s/%s in subscription", vmResourceGroup, vmName) continue } @@ -192,13 +192,13 @@ func (az *AzureClient) getLocalInstanceNetwork() (VirtualNetworkAttributes, clou } else if len(vNetNames) > 1 { return vNet, subnet, fmt.Errorf("Multiple virtual networks are attached to this instance and it is impossible to tell which network nodes should be launched into. A virtualNetworkName will need to be specified in the cloud.azure section server.yml") } - glog.Infof("local machine is connected to virtual network %s", vNetNames[0]) + klog.V(2).Infof("local machine is connected to virtual network %s", vNetNames[0]) if len(subnetNames) == 0 { return vNet, subnet, fmt.Errorf("could not detect which subnet the controller is attached to. A subnetName will need to be specified in server.yml") } else if len(subnetNames) > 1 { return vNet, subnet, fmt.Errorf("Multiple subnets are attached to this instance and it is impossible to tell which subnet nodes should be launched into. A subnetName will need to be specified in the cloud.azure section server.yml") } - glog.Infof("local machine is connected to subnet %s", subnetNames[0]) + klog.V(2).Infof("local machine is connected to subnet %s", subnetNames[0]) vNet, err := az.getVNet(vNetNames[0]) if err != nil { @@ -245,7 +245,7 @@ func (az *AzureClient) ModifySourceDestinationCheck(instanceID string, isEnabled if err != nil { return err } - glog.Infof("enabled src/dst check on %q", instanceID) + klog.V(2).Infof("enabled src/dst check on %q", instanceID) return nil } @@ -313,7 +313,7 @@ func (az *AzureClient) RemoveRoute(destinationCIDR string) error { if err != nil { return err } - glog.Infof("removed route for %q", destinationCIDR) + klog.V(2).Infof("removed route for %q", destinationCIDR) } } return nil @@ -369,7 +369,7 @@ func (az *AzureClient) AddRoute(destinationCIDR, instanceID string) error { if err != nil { return util.WrapError(err, "adding route") } - glog.Infof("created route %q via VM %q", destinationCIDR, instanceID) + klog.V(2).Infof("created route %q via VM %q", destinationCIDR, instanceID) return nil } diff --git a/pkg/server/cloud/azure/security_groups.go b/pkg/server/cloud/azure/security_groups.go index a2f42f37..a76d50ce 100644 --- a/pkg/server/cloud/azure/security_groups.go +++ b/pkg/server/cloud/azure/security_groups.go @@ -10,7 +10,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) func (az *AzureClient) EnsureMilpaNSG() error { @@ -32,7 +32,7 @@ func (az *AzureClient) EnsureMilpaNSG() error { if err != nil { return util.WrapError(err, "Error creating cluster network security group %s", nsgName) } - glog.Infof("Creating cluster network security group %s", nsgName) + klog.V(2).Infof("Creating cluster network security group %s", nsgName) timeoutCtx, cancel = context.WithTimeout(ctx, azureWaitTimeout) defer cancel() err = future.WaitForCompletionRef(timeoutCtx, az.nsgs.Client) diff --git a/pkg/server/cloud/cloud.go b/pkg/server/cloud/cloud.go index da733aba..499b7b9e 100644 --- a/pkg/server/cloud/cloud.go +++ b/pkg/server/cloud/cloud.go @@ -10,7 +10,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) const MilpaAPISGName = "NodeSecurityGroup" @@ -127,7 +127,7 @@ func FilterImages(images []Image, tags BootImageTags) []Image { t := BootImageTags{} t.Set(img.Name) if t.Matches(tags) { - glog.Infof("Found image %s matching filter %+v", img.Name, tags) + klog.V(4).Infof("Found image %s matching filter %+v", img.Name, tags) result = append(result, img) } } @@ -142,22 +142,22 @@ func SortImages(images []Image) { bitI.Set(images[i].Name) versionI, err := strconv.ParseUint(bitI.Version, 10, 32) if err != nil { - glog.Warningf("Getting version for image %+v: %v", bitI, err) + klog.Warningf("Getting version for image %+v: %v", bitI, err) } dateI, err := bitI.Timestamp() if err != nil { - glog.Warningf("Getting timestamp for image %+v: %v", bitI, err) + klog.Warningf("Getting timestamp for image %+v: %v", bitI, err) dateI = time.Unix(0, 0) } bitJ := BootImageTags{} bitJ.Set(images[j].Name) versionJ, err := strconv.ParseUint(bitJ.Version, 10, 32) if err != nil { - glog.Warningf("Getting version for image %+v: %v", bitI, err) + klog.Warningf("Getting version for image %+v: %v", bitI, err) } dateJ, err := bitJ.Timestamp() if err != nil { - glog.Warningf("Getting timestamp for image %+v: %v", bitI, err) + klog.Warningf("Getting timestamp for image %+v: %v", bitI, err) dateJ = time.Unix(0, 0) } if versionI != versionJ { @@ -175,7 +175,7 @@ func GetBestImage(images []Image, tags BootImageTags) (string, error) { return "", err } latest := images[len(images)-1].Id - glog.Infof("Found image %s for tags %v", latest, tags) + klog.V(2).Infof("Found image %s for tags %v", latest, tags) return latest, nil } diff --git a/pkg/server/cloud/common.go b/pkg/server/cloud/common.go index 7f6df65d..7cd5cc46 100644 --- a/pkg/server/cloud/common.go +++ b/pkg/server/cloud/common.go @@ -3,8 +3,8 @@ package cloud import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" ) // Diffing rules got a little nasty... We take the cross of the @@ -85,7 +85,7 @@ func ToSaneVolumeSize(volSizeSpec string) int32 { // we're not carrying that values around anywhere. We could // somehow make that value a global var but it seemed like I // would start abusing that out of lazyness. - glog.Errorln("Empty volume size found in resource spec, setting to reasonable value") + klog.Errorln("Empty volume size found in resource spec, setting to reasonable value") volSizeGiB = 8 } return volSizeGiB diff --git a/pkg/server/cloud/status.go b/pkg/server/cloud/status.go index b9a20e6a..afa5f274 100644 --- a/pkg/server/cloud/status.go +++ b/pkg/server/cloud/status.go @@ -7,8 +7,8 @@ import ( "time" "github.com/elotl/cloud-instance-provider/pkg/util/timeoutmap" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/klog" ) const ( @@ -81,7 +81,7 @@ func (s *subnetPoller) runRefreshLoop() { case <-ticker.C: subnets, err := s.client.GetSubnets() if err != nil { - glog.Errorf("Error refreshing cloud subnet info: %s, continuing with cached data", err) + klog.Errorf("Error refreshing cloud subnet info: %s, continuing with cached data", err) continue } s.Lock() @@ -90,7 +90,7 @@ func (s *subnetPoller) runRefreshLoop() { availabilityZones, err := s.client.GetAvailabilityZones() if err != nil { - glog.Errorf("Error refreshing cloud availability zone info: %s, continuing with cached data", err) + klog.Errorf("Error refreshing cloud availability zone info: %s, continuing with cached data", err) continue } s.Lock() @@ -159,7 +159,7 @@ func (s *LinkedAZSubnetStatus) Dump() []byte { } b, err := json.MarshalIndent(dumpStruct, "", " ") if err != nil { - glog.Errorln("Error dumping data from cloud.Status", err) + klog.Errorln("Error dumping data from cloud.Status", err) return nil } return b @@ -241,7 +241,7 @@ func (s *LinkedAZSubnetStatus) AddUnavailableInstance(instanceType string, spot } func (s *LinkedAZSubnetStatus) AddUnavailableZone(instanceType string, spot bool, zone string) { - glog.Infof("Adding unavailable zone %s for instance type %s", zone, instanceType) + klog.V(2).Infof("Adding unavailable zone %s for instance type %s", zone, instanceType) s.RLock() defer s.RUnlock() for i, _ := range s.subnets { @@ -252,7 +252,7 @@ func (s *LinkedAZSubnetStatus) AddUnavailableZone(instanceType string, spot bool } func (s *LinkedAZSubnetStatus) AddUnavailableSubnet(instanceType string, spot bool, subnetID string) { - glog.Infof("Adding unavailable subnet %s for instance type %s", subnetID, instanceType) + klog.V(2).Infof("Adding unavailable subnet %s for instance type %s", subnetID, instanceType) key := makeUnavailableKey(instanceType, spot, subnetID) // only update the entry if it doesn't already exist. It might be // tempting to always update the object but that could lead to a @@ -344,7 +344,7 @@ func (s *AZSubnetStatus) Dump() []byte { } b, err := json.MarshalIndent(dumpStruct, "", " ") if err != nil { - glog.Errorln("Error dumping data from cloud.Status", err) + klog.Errorln("Error dumping data from cloud.Status", err) return nil } return b @@ -399,7 +399,7 @@ func (s *AZSubnetStatus) GetAvailableSubnets(instanceType string, spot, privateI } func (s *AZSubnetStatus) AddUnavailableZone(instanceType string, spot bool, zone string) { - glog.Infof("Adding unavailable zone %s for instance type %s", zone, instanceType) + klog.V(2).Infof("Adding unavailable zone %s for instance type %s", zone, instanceType) key := makeUnavailableKey(instanceType, spot, zone) _, exists := s.unavailableZones.Get(key) if !exists { @@ -408,7 +408,7 @@ func (s *AZSubnetStatus) AddUnavailableZone(instanceType string, spot bool, zone } func (s *AZSubnetStatus) AddUnavailableSubnet(instanceType string, spot bool, subnetID string) { - glog.Infof("Adding unavailable subnet %s for instance type %s", subnetID, instanceType) + klog.V(2).Infof("Adding unavailable subnet %s for instance type %s", subnetID, instanceType) key := makeUnavailableKey(instanceType, spot, subnetID) // only update the entry if it doesn't already exist. It might be // tempting to always update the object but that could lead to a diff --git a/pkg/server/config.go b/pkg/server/config.go index 7def9fe8..4779e4eb 100644 --- a/pkg/server/config.go +++ b/pkg/server/config.go @@ -17,8 +17,8 @@ import ( vutil "github.com/elotl/cloud-instance-provider/pkg/util/validation" "github.com/elotl/cloud-instance-provider/pkg/util/validation/field" "github.com/elotl/cloud-instance-provider/pkg/util/yaml" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" ) const ( @@ -177,11 +177,11 @@ func setupAwsEnvVars(c *AWSConfig) error { return err } } - glog.Infof("Validating connection to AWS") + klog.V(2).Infof("Validating connection to AWS") if err := aws.CheckConnection(); err != nil { return util.WrapError(err, "Error validationg connection to AWS") } - glog.Infof("Validated access to AWS") + klog.V(2).Infof("Validated access to AWS") return nil } @@ -198,11 +198,11 @@ func setupAzureEnvVars(c *AzureConfig) error { if err := os.Setenv("SUBSCRIPTION_ID", c.SubscriptionID); err != nil { return err } - glog.Infof("Validating connection to Azure") + klog.V(2).Infof("Validating connection to Azure") if err := azure.CheckConnection(c.SubscriptionID); err != nil { return util.WrapError(err, "Error validationg connection to Azure") } - glog.Infof("Validated access to Azure") + klog.V(2).Infof("Validated access to Azure") return nil } @@ -301,9 +301,9 @@ func ConfigureCloud(configFile *ServerConfigFile, controllerID, nametag string) usePublicIPs := !cloudClient.ControllerInsideVPC() if usePublicIPs { - glog.Infof("controller is outside the cloud network, connecting via public IPs") + klog.V(2).Infof("controller is outside the cloud network, connecting via public IPs") } else { - glog.Infof("controller is inside the cloud network, connecting via private IPs") + klog.V(2).Infof("controller is inside the cloud network, connecting via private IPs") } err = cloudClient.EnsureMilpaSecurityGroups( configFile.Cells.ExtraCIDRs, diff --git a/pkg/server/controller_manager.go b/pkg/server/controller_manager.go index 4808eb01..01fa42db 100644 --- a/pkg/server/controller_manager.go +++ b/pkg/server/controller_manager.go @@ -3,8 +3,8 @@ package server import ( "sync" - "github.com/golang/glog" "github.com/uber-go/atomic" + "k8s.io/klog" ) // The ControllerManager was created to make the interaction @@ -81,7 +81,7 @@ func (cm *ControllerManager) WaitForShutdown(systemShutdown <-chan struct{}, sys select { case <-systemShutdown: - glog.Infof("Shutting down controllers") + klog.V(2).Infof("Shutting down controllers") cm.StopControllers() return } @@ -89,28 +89,28 @@ func (cm *ControllerManager) WaitForShutdown(systemShutdown <-chan struct{}, sys func (cm *ControllerManager) startControllersHelper() { if cm.ControllersRunning() { - glog.Warning("Asked to start controllers but they are already running") + klog.Warning("Asked to start controllers but they are already running") return } - glog.Info("Starting controllers") + klog.V(2).Info("Starting controllers") cm.controllerQuit = make(chan struct{}) cm.controllerWaitGroup = &sync.WaitGroup{} cm.controllersRunning.Store(true) for name, controller := range cm.controllers { - glog.Infof("Starting %s", name) + klog.V(2).Infof("Starting %s", name) go controller.Start(cm.controllerQuit, cm.controllerWaitGroup) } - glog.Info("Finished starting controllers") + klog.V(2).Info("Finished starting controllers") } func (cm *ControllerManager) stopControllersHelper() { if !cm.ControllersRunning() { - glog.Warning("Asked to stop controllers but they are not running") + klog.Warning("Asked to stop controllers but they are not running") return } - glog.Info("Starting to stop controllers") + klog.V(2).Info("Starting to stop controllers") close(cm.controllerQuit) cm.controllerWaitGroup.Wait() cm.controllersRunning.Store(false) - glog.Info("All controllers stopped") + klog.V(2).Info("All controllers stopped") } diff --git a/pkg/server/convert.go b/pkg/server/convert.go index 20233a14..667947e9 100644 --- a/pkg/server/convert.go +++ b/pkg/server/convert.go @@ -6,11 +6,11 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/util" k8sutil "github.com/elotl/cloud-instance-provider/pkg/util/k8s" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" ) const ( @@ -279,7 +279,7 @@ func k8sToMilpaVolume(vol v1.Volume) *api.Volume { }, } } else { - glog.Warningf("Unsupported volume type for volume: %s", vol.Name) + klog.Warningf("Unsupported volume type for volume: %s", vol.Name) return &api.Volume{ Name: vol.Name, VolumeSource: api.VolumeSource{ @@ -347,7 +347,7 @@ func milpaToK8sVolume(vol api.Volume) *v1.Volume { }, } } else { - glog.Warningf("Unspported volume type for volume: %s", vol.Name) + klog.Warningf("Unspported volume type for volume: %s", vol.Name) } return nil } diff --git a/pkg/server/create.go b/pkg/server/create.go index df57c2bb..75f18456 100644 --- a/pkg/server/create.go +++ b/pkg/server/create.go @@ -7,8 +7,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/yaml" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) const bufferSize = 16000 @@ -41,7 +41,7 @@ func (s InstanceProvider) Create(context context.Context, request *clientapi.Cre return errToAPIReply(util.WrapError(err, "Error creating resource")), nil } - glog.Infof("Creating: %s", objectKind) + klog.V(2).Infof("Creating: %s", objectKind) replyObj, err := store.Create(milpaObj) if err != nil { return errToAPIReply(util.WrapError(err, "Error creating resource")), nil diff --git a/pkg/server/delete.go b/pkg/server/delete.go index 470a8c16..42f6b1f0 100644 --- a/pkg/server/delete.go +++ b/pkg/server/delete.go @@ -7,7 +7,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) func (s InstanceProvider) deleteHelper(kind, name string, cascade bool) (api.MilpaObject, error) { @@ -33,7 +33,7 @@ func (s InstanceProvider) Delete(context context.Context, request *clientapi.Del kind = kind[0 : len(kind)-1] } name := string(request.Name) - glog.Infof("Delete request for: %s - %s", kind, name) + klog.V(2).Infof("Delete request for: %s - %s", kind, name) replyObj, err := s.deleteHelper(kind, name, request.Cascade) if err != nil { diff --git a/pkg/server/deploy.go b/pkg/server/deploy.go index 4943ecc1..aebd6a1d 100644 --- a/pkg/server/deploy.go +++ b/pkg/server/deploy.go @@ -9,7 +9,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) func (s InstanceProvider) deploy(podName, pkgName string, pkgData io.Reader) error { @@ -65,7 +65,7 @@ func (s InstanceProvider) Deploy(stream clientapi.Milpa_DeployServer) error { break } if err != nil { - glog.Errorf("Failed to receive deploy request: %v", err) + klog.Errorf("Failed to receive deploy request: %v", err) return util.WrapError(err, "Failed to receive deploy request") } pod = req.ResourceName @@ -102,6 +102,6 @@ func (s InstanceProvider) Deploy(stream clientapi.Milpa_DeployServer) error { Status: 200, Body: []byte("{}"), } - glog.Infof("Deployed package %s for %s", name, pod) + klog.V(2).Infof("Deployed package %s for %s", name, pod) return stream.SendAndClose(&reply) } diff --git a/pkg/server/deploy_util.go b/pkg/server/deploy_util.go index 62710a5c..463cf172 100644 --- a/pkg/server/deploy_util.go +++ b/pkg/server/deploy_util.go @@ -134,7 +134,7 @@ func getSecretFiles(secVol *api.SecretVolumeSource, sec *v1.Secret) (map[string] // if err != nil { // msg := fmt.Sprintf("volume %s items %s/%s references improperly formatted key %s: %v", secVol.SecretName, sec.Namespace, sec.Name, item.Key, err) // if optional { - // glog.Warning(msg) + // klog.Warning(msg) // continue // } // return nil, fmt.Errorf(msg) diff --git a/pkg/server/dump.go b/pkg/server/dump.go index b44611f2..bbbe0192 100644 --- a/pkg/server/dump.go +++ b/pkg/server/dump.go @@ -6,8 +6,8 @@ import ( "runtime/pprof" "github.com/elotl/cloud-instance-provider/pkg/clientapi" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) func (s InstanceProvider) dumpController(name string) ([]byte, error) { @@ -51,7 +51,7 @@ func dumpStack() ([]byte, error) { } func (s InstanceProvider) Dump(context context.Context, request *clientapi.DumpRequest) (*clientapi.APIReply, error) { - glog.Infof("Dump request for: %s", request.Kind) + klog.V(2).Infof("Dump request for: %s", request.Kind) kind := string(request.Kind) var b []byte var err error diff --git a/pkg/server/errors.go b/pkg/server/errors.go index b0299d03..f74fa8dc 100644 --- a/pkg/server/errors.go +++ b/pkg/server/errors.go @@ -8,7 +8,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -33,7 +33,7 @@ func errToAPIReplyHelper(origErr error, errMsg string) *clientapi.APIReply { default: output, marshallErr := json.Marshal(errMsg) if marshallErr != nil { - glog.Errorf("Could not marshal error: %v", marshallErr) + klog.Errorf("Could not marshal error: %v", marshallErr) // ain't json but thats ok, dont' call marshall recursively output = []byte(marshallErr.Error()) } diff --git a/pkg/server/events/events.go b/pkg/server/events/events.go index 13042893..cec02d05 100644 --- a/pkg/server/events/events.go +++ b/pkg/server/events/events.go @@ -5,7 +5,7 @@ import ( "reflect" "sync" - "github.com/golang/glog" + "k8s.io/klog" ) var ArbitraryChanSize = 10000 @@ -129,16 +129,16 @@ func (es *EventSystem) Run(quit <-chan struct{}, wg *sync.WaitGroup) { for _, eh := range handlers { eventCpy := copyEvent(event) if reflect.ValueOf(eventCpy.Object).Kind() != reflect.Ptr { - glog.Errorf("Event objects must be pointers: %+v", event) + klog.Errorf("Event objects must be pointers: %+v", event) break } err := eh.Handle(eventCpy) if err != nil { - glog.Errorf("Error in %s event handler: %v", event.Status, err) + klog.Errorf("Error in %s event handler: %v", event.Status, err) } } case <-quit: - glog.Info("Stopping Events System") + klog.V(2).Info("Stopping Events System") return } } diff --git a/pkg/server/garbage_controller.go b/pkg/server/garbage_controller.go index 6d66a0b0..85a30bb8 100644 --- a/pkg/server/garbage_controller.go +++ b/pkg/server/garbage_controller.go @@ -13,7 +13,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util/sets" "github.com/elotl/cloud-instance-provider/pkg/util/stats" - "github.com/golang/glog" + "k8s.io/klog" ) var lastUnknownInstances map[string]bool @@ -46,7 +46,7 @@ func (c *GarbageController) Start(quit <-chan struct{}, wg *sync.WaitGroup) { func (c *GarbageController) Dump() []byte { b, err := json.MarshalIndent(c.timer, "", " ") if err != nil { - glog.Errorln("Error dumping data from GarbageController", err) + klog.Errorln("Error dumping data from GarbageController", err) return nil } return b @@ -66,7 +66,7 @@ func (c *GarbageController) GCLoop(quit <-chan struct{}, wg *sync.WaitGroup) { // are timing out talking to etcd, lets give quit priority select { case <-quit: - glog.Info("Stopping GarbageController") + klog.V(2).Info("Stopping GarbageController") return default: } @@ -80,7 +80,7 @@ func (c *GarbageController) GCLoop(quit <-chan struct{}, wg *sync.WaitGroup) { case <-cleanResourceGroupsTicker.C: c.CleanAzureResourceGroups() case <-quit: - glog.Info("Stopping GarbageController") + klog.V(2).Info("Stopping GarbageController") return } } @@ -91,19 +91,19 @@ func (c *GarbageController) CleanTerminatedNodes() { return n.Status.Phase == api.NodeTerminated }) if err != nil { - glog.Errorln("Couldn't list terminated nodes", err) + klog.Errorln("Couldn't list terminated nodes", err) } now := api.Now() for _, node := range nodes.Items { if node.DeletionTimestamp == nil { - glog.Warningf("Found node with nil deletion timestamp") + klog.Warningf("Found node with nil deletion timestamp") _, _ = c.nodeRegistry.SetNodeDeletionTimestamp(node) } if node.DeletionTimestamp != nil && node.DeletionTimestamp.Add(30*time.Second).Before(now) { _, err = c.nodeRegistry.PurgeNode(node) if err != nil { - glog.Errorf("Error purging terminated nodes") + klog.Errorf("Error purging terminated nodes") } } } @@ -113,7 +113,7 @@ func (c *GarbageController) CleanInstances() { unknownInstances := make(map[string]bool) nodes, err := c.nodeRegistry.ListNodes(registry.MatchAllNodes) if err != nil { - glog.Errorf("Error listing nodes in GC: %s", err.Error()) + klog.Errorf("Error listing nodes in GC: %s", err.Error()) return } nodeSet := make(map[string]bool) @@ -121,12 +121,12 @@ func (c *GarbageController) CleanInstances() { nodeSet[node.Name] = true } if err != nil { - glog.Errorf("Error listing nodes for instance cleaning: %s", err.Error()) + klog.Errorf("Error listing nodes for instance cleaning: %s", err.Error()) return } instances, err := c.cloudClient.ListInstances() if err != nil { - glog.Errorf("Error listing cloud instances: %s", err.Error()) + klog.Errorf("Error listing cloud instances: %s", err.Error()) return } for _, inst := range instances { @@ -136,11 +136,11 @@ func (c *GarbageController) CleanInstances() { } for iid, _ := range unknownInstances { if lastUnknownInstances[iid] { - glog.Errorf("Stopping unknown cloud instance %s", iid) + klog.Errorf("Stopping unknown cloud instance %s", iid) go func() { err := c.cloudClient.StopInstance(iid) if err != nil { - glog.Error(err) + klog.Error(err) } }() } @@ -155,7 +155,7 @@ func (c *GarbageController) CleanAzureResourceGroups() { } err := c.CleanAzureResourceGroupsHelper(az) if err != nil { - glog.Error(err) + klog.Error(err) } } @@ -169,7 +169,7 @@ func (c *GarbageController) cleanFargateTaskDefs() { } taskDefARNs, err := client.ListTaskDefinitions() if err != nil { - glog.Errorln("Error listing ECS Fargate task definitions for cleanup:", err) + klog.Errorln("Error listing ECS Fargate task definitions for cleanup:", err) return } if len(taskDefARNs) == 0 { @@ -177,7 +177,7 @@ func (c *GarbageController) cleanFargateTaskDefs() { } pods, err := c.podRegistry.ListPods(registry.MatchAllPods) if err != nil { - glog.Errorln("Error listing pods when cleaning up fargate task definitions", err) + klog.Errorln("Error listing pods when cleaning up fargate task definitions", err) } podNames := sets.NewString() for i := range pods.Items { @@ -188,7 +188,7 @@ func (c *GarbageController) cleanFargateTaskDefs() { for _, taskDefARN := range doomedTaskDefARNs.List() { err := client.DeregisterTaskDefinition(taskDefARN) if err != nil { - glog.Errorln("Error cleaning up old task definition", taskDefARN) + klog.Errorln("Error cleaning up old task definition", taskDefARN) } } c.lastOldTaskDefs = oldTaskDefs @@ -259,12 +259,12 @@ func (c *GarbageController) CleanAzureResourceGroupsHelper(client ResourceGroupe c.lastOrphanedAzureGroups = newOrphaned if doomedGroups.Len() > 0 { - glog.Errorf("Deleting %d orphaned azure resource groups: %v", doomedGroups.Len(), doomedGroups.List()) + klog.Errorf("Deleting %d orphaned azure resource groups: %v", doomedGroups.Len(), doomedGroups.List()) } for _, groupName := range doomedGroups.List() { err := client.DeleteResourceGroup(groupName) if err != nil { - glog.Errorf("Error deleting orphaned resource group: %s", err.Error()) + klog.Errorf("Error deleting orphaned resource group: %s", err.Error()) } } return nil diff --git a/pkg/server/getcontainerlogs.go b/pkg/server/getcontainerlogs.go index da9b617c..f183a14f 100644 --- a/pkg/server/getcontainerlogs.go +++ b/pkg/server/getcontainerlogs.go @@ -10,26 +10,26 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/nodeclient" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/wsstream" - "github.com/golang/glog" vkapi "github.com/virtual-kubelet/virtual-kubelet/node/api" "github.com/virtual-kubelet/virtual-kubelet/trace" + "k8s.io/klog" ) func (p *InstanceProvider) GetContainerLogs(ctx context.Context, namespace, podName, containerName string, opts vkapi.ContainerLogOpts) (io.ReadCloser, error) { ctx, span := trace.StartSpan(ctx, "GetContainerLogs") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, podName, containerNameKey, containerName) - glog.Infof("GetContainerLogs %+v", opts) + klog.V(5).Infof("GetContainerLogs %+v", opts) // Pending PR: https://github.com/virtual-kubelet/virtual-kubelet/pull/806 // follow := opts.Follow follow := false podName = util.WithNamespace(namespace, podName) node, err := p.GetNodeForRunningPod(podName, "") if !follow || err != nil || node == nil || len(node.Status.Addresses) == 0 { - glog.V(4).Infof("pulling logs for pod %+v", opts) + klog.V(5).Infof("pulling logs for pod %+v", opts) return p.getContainerLogs(podName, containerName, opts) } - glog.V(4).Infof("tailing logs for pod %+v", opts) + klog.V(5).Infof("tailing logs for pod %+v", opts) return p.tailContainerLogs(node, podName, containerName, opts) } @@ -70,33 +70,33 @@ type containerLogs struct { } func (l *containerLogs) Read(buf []byte) (int, error) { - glog.V(4).Infof("reading logs from ws stream") + klog.V(5).Infof("reading logs from ws stream") n := 0 if len(l.buf) > 0 { - glog.V(4).Infof("reading %d bytes from buffer", len(l.buf)) + klog.V(5).Infof("reading %d bytes from buffer", len(l.buf)) n = copy(buf, l.buf) l.buf = l.buf[n:] return n, nil } select { case <-l.ws.Closed(): - glog.V(4).Infof("ws stream closed") + klog.V(5).Infof("ws stream closed") return 0, io.EOF case frame := <-l.ws.ReadMsg(): n, b, err := wsstream.UnpackMessage(frame) if err != nil { - glog.Errorf("reading ws stream: %v", err) + klog.Errorf("reading ws stream: %v", err) return 0, err } - glog.V(4).Infof("read %d bytes from ws stream", n) + klog.V(5).Infof("read %d bytes from ws stream", n) n = copy(buf, b) l.buf = append(l.buf[:], b[n:]...) - glog.V(4).Infof("copied %d bytes from ws stream", n) + klog.V(5).Infof("copied %d bytes from ws stream", n) return n, nil } } func (l *containerLogs) Close() error { - glog.V(4).Infof("closing ws stream") + klog.V(5).Infof("closing ws stream") return l.ws.CloseAndCleanup() } diff --git a/pkg/server/getstatssummary.go b/pkg/server/getstatssummary.go index ff3ef4d6..d460acbc 100644 --- a/pkg/server/getstatssummary.go +++ b/pkg/server/getstatssummary.go @@ -7,9 +7,9 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "github.com/virtual-kubelet/virtual-kubelet/trace" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" ) @@ -23,7 +23,7 @@ func (p *InstanceProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, var span trace.Span ctx, span = trace.StartSpan(ctx, "GetStatsSummary") defer span.End() - glog.Infof("GetStatsSummary()") + klog.V(5).Infof("GetStatsSummary()") zero := uint64(0) now := metav1.NewTime(time.Now()) res := &stats.Summary{} @@ -48,7 +48,7 @@ func (p *InstanceProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, return false }) if err != nil { - glog.Errorf("listing pods for stats: %v", err) + klog.Errorf("listing pods for stats: %v", err) return nil, util.WrapError(err, "listing pods for stats") } metricsRegistry := p.getMetricsRegistry() @@ -56,7 +56,7 @@ func (p *InstanceProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, podMetricsList := metricsRegistry.GetPodMetrics(pod.Name) podMetricsItems := podMetricsList.Items if len(podMetricsItems) < 1 { - glog.V(3).Infof("no metrics found for pod %s", pod.Name) + klog.V(2).Infof("no metrics found for pod %s", pod.Name) continue } // First metrics sample from the pod. @@ -87,7 +87,7 @@ func (p *InstanceProvider) GetStatsSummary(ctx context.Context) (*stats.Summary, pss.Memory.WorkingSetBytes = &podUsage.WorkingSetBytes res.Pods = append(res.Pods, pss) } - glog.Infof("GetStatsSummary() %+v", res) + klog.V(5).Infof("GetStatsSummary() %+v", res) return res, nil } diff --git a/pkg/server/logs.go b/pkg/server/logs.go index c44bd3b9..86cd3b28 100644 --- a/pkg/server/logs.go +++ b/pkg/server/logs.go @@ -11,8 +11,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/nodeclient" "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) // Logs requests can take a couple of forms: @@ -189,7 +189,7 @@ func (s InstanceProvider) GetLogs(context context.Context, request *clientapi.Lo lines := int(request.Lines) bytes := int(request.Limitbytes) - glog.Infof("Getting logs from %s/%s (max lines %d; limitbytes %d)", + klog.V(2).Infof("Getting logs from %s/%s (max lines %d; limitbytes %d)", resourceName, itemName, lines, bytes) foundLog, err := s.findLog(resourceName, itemName, lines, bytes) diff --git a/pkg/server/metrics_controller.go b/pkg/server/metrics_controller.go index a34892b3..37845cff 100644 --- a/pkg/server/metrics_controller.go +++ b/pkg/server/metrics_controller.go @@ -7,7 +7,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -27,7 +27,7 @@ func (c *MetricsController) Dump() []byte { } b, err := json.MarshalIndent(dumpStruct, "", " ") if err != nil { - glog.Errorln("Error dumping data from metrics controller", err) + klog.Errorln("Error dumping data from metrics controller", err) return nil } return b @@ -47,11 +47,11 @@ func (c *MetricsController) runSyncLoop(quit <-chan struct{}, wg *sync.WaitGroup case <-cleanTicker.C: err := c.cleanOldMetrics() if err != nil { - glog.Errorf("Error cleaning old metrics: %s", err) + klog.Errorf("Error cleaning old metrics: %s", err) } case <-quit: cleanTicker.Stop() - glog.Info("Exiting MetricsController Sync Loop") + klog.V(2).Info("Exiting MetricsController Sync Loop") return } } diff --git a/pkg/server/nodemanager/node_controller.go b/pkg/server/nodemanager/node_controller.go index 2d4581b2..082e43b1 100644 --- a/pkg/server/nodemanager/node_controller.go +++ b/pkg/server/nodemanager/node_controller.go @@ -20,7 +20,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util/cloudinitfile" "github.com/elotl/cloud-instance-provider/pkg/util/stats" "github.com/elotl/cloud-instance-provider/pkg/util/timeoutmap" - "github.com/golang/glog" + "k8s.io/klog" ) // Making these vars makes it easier testing @@ -83,7 +83,7 @@ func (c *NodeController) Dump() []byte { t := c.PoolLoopTimer.Copy() b, err := json.MarshalIndent(*t, "", " ") if err != nil { - glog.Errorln("Error dumping data from NodeController", err) + klog.Errorln("Error dumping data from NodeController", err) return nil } return b @@ -105,7 +105,7 @@ func (c *NodeController) updateBufferedNodesLoop(quit <-chan struct{}, wg *sync. c.PoolLoopTimer.StartLoop() updatedBindings, err := c.doPoolsCalculation() if err != nil { - glog.Errorln("Error adjusting node pools", err.Error()) + klog.Errorln("Error adjusting node pools", err.Error()) } else { nodeBindingsUpdate <- updatedBindings } @@ -140,7 +140,7 @@ func (c *NodeController) doPoolsCalculation() (map[string]string, error) { if BootImage == "" { return nil, util.WrapError(err, "Could not get latest boot image") } else { - glog.Warningf("Could not get latest boot image: %s, using stored value for boot image: %s", err, BootImage) + klog.Warningf("Could not get latest boot image: %s, using stored value for boot image: %s", err, BootImage) newBootImage = BootImage } } @@ -158,7 +158,7 @@ func (c *NodeController) doPoolsCalculation() (map[string]string, error) { for _, node := range stopNodes { err := c.stopSingleNode(node) if err != nil { - glog.Warningln("Error stopping single node", err) + klog.Warningln("Error stopping single node", err) continue } } @@ -241,7 +241,7 @@ func (c *NodeController) startNodes(nodes []*api.Node) { } metadata, err := c.getCloudInitContents() if err != nil { - glog.Errorf("Error creating node metadata: %s", err) + klog.Errorf("Error creating node metadata: %s", err) return } // Randomize boot order to prevent getting stuck with 10 nodes at @@ -254,12 +254,12 @@ func (c *NodeController) startNodes(nodes []*api.Node) { } for i, newNode := range nodes { if i >= MaxBootPerIteration { - glog.Infof("Rate limiting start requests to %d per iteration", MaxBootPerIteration) + klog.V(2).Infof("Rate limiting start requests to %d per iteration", MaxBootPerIteration) break } newNode, err := c.NodeRegistry.CreateNode(newNode) if err != nil { - glog.Errorf("Error creating node in registry: %v", err) + klog.Errorf("Error creating node in registry: %v", err) continue } go c.startSingleNode(newNode, metadata) @@ -296,10 +296,10 @@ func (c *NodeController) startSingleNode(node *api.Node, cloudInitData string) e } if err != nil { c.handleStartNodeError(node, err, false) - glog.Errorf("Error in node start: %v", err) + klog.Errorf("Error in node start: %v", err) _, regError := c.NodeRegistry.PurgeNode(node) if regError != nil { - glog.Errorf("Error marking node %s terminated after failed start: %s", + klog.Errorf("Error marking node %s terminated after failed start: %s", node.Name, regError.Error()) } return util.WrapError(err, "Error starting node") @@ -317,7 +317,7 @@ func (c *NodeController) finishNodeStart(node *api.Node) error { // a describe instance here... addresses, err := c.CloudClient.WaitForRunning(node) if err != nil { - glog.Infof("Unhealthy wait for running, terminating node: %s", node.Name) + klog.V(2).Infof("Unhealthy wait for running, terminating node: %s", node.Name) _ = c.stopSingleNode(node) return util.WrapError(err, "Error waiting for node to be running") } @@ -329,7 +329,7 @@ func (c *NodeController) finishNodeStart(node *api.Node) error { func (c *NodeController) stopSingleNode(node *api.Node) error { // to keep counts in sync, don't move this inside the goroutine - glog.Infof("Stopping node: %s", node.Name) + klog.V(2).Infof("Stopping node: %s", node.Name) node.Status.Phase = api.NodeTerminating _, err := c.NodeRegistry.UpdateStatus(node) @@ -343,7 +343,7 @@ func (c *NodeController) stopSingleNode(node *api.Node) error { _ = c.CloudClient.StopInstance(n.Status.InstanceID) _, err := c.NodeRegistry.PurgeNode(node) if err != nil { - glog.Errorf("Could not mark node %s as terminated: %v", n.Name, err) + klog.Errorf("Could not mark node %s as terminated: %v", n.Name, err) } }(node) return nil @@ -368,7 +368,7 @@ func (c *NodeController) runHeartbeatsLoop(quit <-chan struct{}, wg *sync.WaitGr case <-ticker.C: allNodes, err := c.NodeRegistry.ListNodes(registry.MatchAllNodes) if err != nil { - glog.Errorf("Error listing nodes for heartbeat: %s", err.Error()) + klog.Errorf("Error listing nodes for heartbeat: %s", err.Error()) // Hack attack.... // The period of this ticker is pretty quick, if our // ListNodes times out then we will not see the quit @@ -381,10 +381,10 @@ func (c *NodeController) runHeartbeatsLoop(quit <-chan struct{}, wg *sync.WaitGr } } if err := c.sendOutHeartbeats(allNodes, heartbeats); err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) } if err := c.markUnhealthyNodes(allNodes, LastHeartbeat); err != nil { - glog.Error(err.Error()) + klog.Error(err.Error()) } pruneHeartbeats(allNodes, LastHeartbeat) case nodeName := <-heartbeats: @@ -421,15 +421,15 @@ func (c *NodeController) StopCreatingNodes() { return n.Status.Phase == api.NodeCreating }) if err != nil { - glog.Errorf("Could not list nodes to check for creating nodes") + klog.Errorf("Could not list nodes to check for creating nodes") } for _, node := range nodes.Items { - glog.Infof("Terminating creating node %s: it was likely lost at restart", + klog.V(2).Infof("Terminating creating node %s: it was likely lost at restart", node.Name) go func(n *api.Node) { err := c.stopSingleNode(n) if err != nil { - glog.Errorln("Error stopping creating node", err) + klog.Errorln("Error stopping creating node", err) } }(node) } @@ -446,16 +446,16 @@ func (c *NodeController) ResumeWaits() { n.Status.Phase == api.NodeCleaning) }) if err != nil { - glog.Errorf("Could not list nodes for resuming waits") + klog.Errorf("Could not list nodes for resuming waits") } - glog.Infof("Resume waiting on healty from %d instances", len(nodes.Items)) + klog.V(2).Infof("Resume waiting on healty from %d instances", len(nodes.Items)) for _, node := range nodes.Items { go func(node *api.Node) error { if len(node.Status.Addresses) == 0 { addresses, err := c.CloudClient.WaitForRunning(node) c.Events.Emit(events.NodeRunning, "node-controller", node, "") if err != nil { - glog.Infof("Unhealthy wait for running, terminating node: %s", + klog.V(2).Infof("Unhealthy wait for running, terminating node: %s", node.Name) _ = c.stopSingleNode(node) return util.WrapError( @@ -489,10 +489,10 @@ func (c *NodeController) markUnhealthyNodes(allNodes *api.NodeList, LastHeartbea if now.Sub(last) < HealthyTimeout { continue } - glog.Warningf("No heartbeats from node %s. Set to terminate.", node.Name) + klog.Warningf("No heartbeats from node %s. Set to terminate.", node.Name) node, err := c.NodeRegistry.MarkForTermination(node) if err != nil { - glog.Errorf("Error marking node %s for termination", node.Name) + klog.Errorf("Error marking node %s for termination", node.Name) } } return nil @@ -519,7 +519,7 @@ func pruneHeartbeats(allNodes *api.NodeList, lastHeartbeat map[string]time.Time) func singleNodeHeartbeat(node *api.Node, client nodeclient.NodeClient, healthyReply chan string) { err := client.Healthcheck() if err != nil { - glog.Warningf("Heartbeat error from node %s: %s", node.Name, err.Error()) + klog.Warningf("Heartbeat error from node %s: %s", node.Name, err.Error()) return } healthyReply <- node.Name @@ -528,24 +528,24 @@ func singleNodeHeartbeat(node *api.Node, client nodeclient.NodeClient, healthyRe func (c *NodeController) waitForAvailableOrTerminate(node *api.Node, timeout time.Duration) error { if len(node.Status.Addresses) == 0 { err := fmt.Errorf("No IP address stored for node %s", node.Name) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) _ = c.stopSingleNode(node) return err } - glog.Infof("Waiting for available on node %s", node.Name) + klog.V(2).Infof("Waiting for available on node %s", node.Name) client := c.NodeClientFactory.GetClient(node.Status.Addresses) err := waitForHealthy(node, client, timeout) if err != nil { - glog.Errorf("Error in node start: node unresponsive for %s seconds", timeout) - glog.Infof("Terminating node: %s", node.Name) + klog.Errorf("Error in node start: node unresponsive for %s seconds", timeout) + klog.V(2).Infof("Terminating node: %s", node.Name) _ = c.stopSingleNode(node) return util.WrapError(err, "Error waiting for healthy node") } node.Status.Phase = api.NodeAvailable _, err = c.NodeRegistry.UpdateStatus(node) if err != nil { - glog.Errorf("Error setting node %s to available,", node.Name) - glog.Infof("Terminating node: %s", node.Name) + klog.Errorf("Error setting node %s to available,", node.Name) + klog.V(2).Infof("Terminating node: %s", node.Name) _ = c.stopSingleNode(node) return util.WrapError(err, "Error waiting for healthy node") } @@ -593,7 +593,7 @@ func (c NodeController) reaperLoop(quit <-chan struct{}, wg *sync.WaitGroup) { n.Status.Phase != api.NodeTerminated) }) if err != nil { - glog.Errorf("Error listing nodes for reaper loop: %s", err.Error()) + klog.Errorf("Error listing nodes for reaper loop: %s", err.Error()) continue } for _, node := range nodes.Items { @@ -612,12 +612,12 @@ func (c NodeController) reaperLoop(quit <-chan struct{}, wg *sync.WaitGroup) { func (c *NodeController) removePodFromNode(node *api.Node) { pod, err := c.PodReader.GetPod(node.Status.BoundPodName) if err != nil { - glog.Warningf("Could not find pod %s that was reported to be on node %s", + klog.Warningf("Could not find pod %s that was reported to be on node %s", node.Status.BoundPodName, node.Name) return } if pod.Status.BoundNodeName != node.Name { - glog.Warningf("Pod %s no longer on node %s", node.Status.BoundPodName, node.Name) + klog.Warningf("Pod %s no longer on node %s", node.Status.BoundPodName, node.Name) return } c.Events.Emit( @@ -627,7 +627,7 @@ func (c *NodeController) removePodFromNode(node *api.Node) { node.Status.BoundPodName = "" _, err = c.NodeRegistry.UpdateStatus(node) if err != nil { - glog.Errorf("Error deleting bound pod on failed node %s: %v", node.Name, err.Error()) + klog.Errorf("Error deleting bound pod on failed node %s: %v", node.Name, err.Error()) } } @@ -659,7 +659,7 @@ func (c *NodeController) dispatchNodesLoop(quit <-chan struct{}, wg *sync.WaitGr case nodeReq := <-c.NodeDispenser.NodeRequestChan: nodeReq.ReplyChan <- c.requestNode(nodeReq, podNodeMap) case returnedNodeMsg := <-c.NodeDispenser.NodeReturnChan: - glog.Infof("Got node %s back", returnedNodeMsg.NodeName) + klog.V(2).Infof("Got node %s back", returnedNodeMsg.NodeName) if returnedNodeMsg.Unused { go c.cleanUnusedNode(returnedNodeMsg.NodeName) } else { @@ -676,17 +676,17 @@ func (c *NodeController) dispatchNodesLoop(quit <-chan struct{}, wg *sync.WaitGr // the node can be reused, here we just mark the node as available and // wipe any info that might have been set. func (c *NodeController) cleanUnusedNode(name string) { - glog.Infof("Node %s is unused, returning to pool", name) + klog.V(2).Infof("Node %s is unused, returning to pool", name) node, err := c.NodeRegistry.GetNode(name) if err != nil { - glog.Errorln("Error retrieving unused node from registry", name) + klog.Errorln("Error retrieving unused node from registry", name) return } node.Status.Phase = api.NodeAvailable node.Status.BoundPodName = "" _, err = c.NodeRegistry.UpdateStatus(node) if err != nil { - glog.Errorf("Error updating node %s status for cleaning unused node: %v", + klog.Errorf("Error updating node %s status for cleaning unused node: %v", name, err) // if things went wrong when putting it back into available, try to // clean it. @@ -703,7 +703,7 @@ func (c *NodeController) requestNode(nodeReq NodeRequest, podNodeMapping map[str boundNode, err := c.NodeRegistry.GetNode(boundNodeName) if err != nil { if err != store.ErrKeyNotFound { - glog.Errorln("Could not list nodes for dispensing to pod:", err) + klog.Errorln("Could not list nodes for dispensing to pod:", err) } return NodeReply{} @@ -712,7 +712,7 @@ func (c *NodeController) requestNode(nodeReq NodeRequest, podNodeMapping map[str } err = c.bindNodeToPod(&nodeReq.requestingPod, boundNode) if err != nil { - glog.Errorln("Error binding pod to available node", err) + klog.Errorln("Error binding pod to available node", err) return NodeReply{} } return NodeReply{ @@ -730,7 +730,7 @@ func (c *NodeController) imageTagsToId(tags cloud.BootImageTags) (string, error) var err error img, err = c.CloudClient.GetImageId(tags) if err != nil { - glog.Errorf("Error resolving image tags %v to image ID: %v", + klog.Errorf("Error resolving image tags %v to image ID: %v", tags, err) return "", err } @@ -738,7 +738,7 @@ func (c *NodeController) imageTagsToId(tags cloud.BootImageTags) (string, error) func(obj interface{}) { _, _ = c.imageTagsToId(tags) }) - glog.Infof("Latest image with tags %v: '%s'", tags, img) + klog.V(2).Infof("Latest image with tags %v: '%s'", tags, img) } return img, nil } @@ -751,12 +751,12 @@ func (c *NodeController) bindNodeToPod(pod *api.Pod, node *api.Node) error { } func (c *NodeController) saveNodeLogs(node *api.Node) { - glog.Infof("Saving node logs") + klog.V(2).Infof("Saving node logs") filename := "/var/log/itzo/itzo.log" client := c.NodeClientFactory.GetClient(node.Status.Addresses) data, err := client.GetFile(filename, 0, nodeclient.SAVE_LOG_BYTES) if err != nil { - glog.Errorf("Error saving node %s log: %s", node.Name, err.Error()) + klog.Errorf("Error saving node %s log: %s", node.Name, err.Error()) return } log := api.NewLogFile() @@ -765,7 +765,7 @@ func (c *NodeController) saveNodeLogs(node *api.Node) { log.Content = string(data) _, err = c.LogRegistry.CreateLog(log) if err != nil { - glog.Errorf("Error saving node %s log to registry: %s", + klog.Errorf("Error saving node %s log to registry: %s", node.Name, err.Error()) } } @@ -775,7 +775,7 @@ func (c *NodeController) cleanUsedNode(name string) error { if err != nil { err = util.WrapError( err, "Error retrieving node %s for cleaning", name) - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) return err } @@ -792,7 +792,7 @@ func (c *NodeController) cleanUsedNode(name string) error { // can't put the node into cleaning, then what should we do? // Should we continue and hope for the best? err = util.WrapError(err, "Error updating node to cleaning status") - glog.Errorf(err.Error()) + klog.Errorf(err.Error()) } c.saveNodeLogs(node) // We've decided to skip cleaning and just terminate. if you @@ -801,7 +801,7 @@ func (c *NodeController) cleanUsedNode(name string) error { // to the node_controller emit a NodeCleaning event (or re-write the // consumers of that event) if err = c.stopSingleNode(node); err != nil { - glog.Errorf("Error in cleaning: could not terinate: %s", err.Error()) + klog.Errorf("Error in cleaning: could not terinate: %s", err.Error()) return err } return nil diff --git a/pkg/server/nodemanager/node_dispensary.go b/pkg/server/nodemanager/node_dispensary.go index fbce9199..50ef3eb2 100644 --- a/pkg/server/nodemanager/node_dispensary.go +++ b/pkg/server/nodemanager/node_dispensary.go @@ -4,7 +4,7 @@ package nodemanager import ( "github.com/elotl/cloud-instance-provider/pkg/api" - "github.com/golang/glog" + "k8s.io/klog" ) type NodeReply struct { @@ -45,7 +45,7 @@ func NewNodeDispenser() *NodeDispenser { func (e *NodeDispenser) RequestNode(requestingPod api.Pod) NodeReply { replyChan := make(chan NodeReply) if e.NodeRequestChan == nil { - glog.Errorf("NodeRequestChan is nil!") + klog.Errorf("NodeRequestChan is nil!") return NodeReply{ Node: nil, } @@ -57,7 +57,7 @@ func (e *NodeDispenser) RequestNode(requestingPod api.Pod) NodeReply { func (e *NodeDispenser) ReturnNode(nodeName string, unused bool) { if nodeName == "" { - glog.Warningf("Got empty node name in ReturnNode") + klog.Warningf("Got empty node name in ReturnNode") return } e.NodeReturnChan <- NodeReturn{ diff --git a/pkg/server/nodemanager/node_scaler.go b/pkg/server/nodemanager/node_scaler.go index cb989add..0153e842 100644 --- a/pkg/server/nodemanager/node_scaler.go +++ b/pkg/server/nodemanager/node_scaler.go @@ -6,7 +6,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/server/cloud" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) type StatusUpdater interface { @@ -259,7 +259,7 @@ func (s *BindingNodeScaler) Compute(nodes []*api.Node, pods []*api.Pod) ([]*api. } _, err := s.nodeRegistry.UpdateStatus(node) if err != nil { - glog.Errorf("Error updating node %s with pod bindings: %s", + klog.Errorf("Error updating node %s with pod bindings: %s", node.Name, err) } } diff --git a/pkg/server/pod_controller.go b/pkg/server/pod_controller.go index 9a1f4d1c..1928a1bd 100644 --- a/pkg/server/pod_controller.go +++ b/pkg/server/pod_controller.go @@ -18,9 +18,9 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/conmap" "github.com/elotl/cloud-instance-provider/pkg/util/stats" - "github.com/golang/glog" "github.com/virtual-kubelet/node-cli/manager" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" ) // make this configurable @@ -68,9 +68,9 @@ type FullPodStatus struct { func (c *PodController) Start(quit <-chan struct{}, wg *sync.WaitGroup) { c.kubernetesNodeName = os.Getenv("NODE_NAME") - glog.Infof("kubernetes node name: %q", c.kubernetesNodeName) + klog.V(2).Infof("kubernetes node name: %q", c.kubernetesNodeName) if c.kubernetesNodeName == "" { - glog.Warningf("failed to get NODE_NAME; cell network agent won't run") + klog.Warningf("failed to get NODE_NAME; cell network agent won't run") } c.registerEventHandlers() c.failDispatchingPods() @@ -96,7 +96,7 @@ func (c *PodController) podUpdated(e events.Event) error { pod.Status.Phase == api.PodRunning { err := c.updatePodUnits(pod) if err != nil { - glog.Errorln("Error updating pod units:", err) + klog.Errorln("Error updating pod units:", err) } } return nil @@ -112,7 +112,7 @@ func (c *PodController) Dump() []byte { } b, err := json.MarshalIndent(dumpStruct, "", " ") if err != nil { - glog.Errorln("Error dumping data from PodController", err) + klog.Errorln("Error dumping data from PodController", err) return nil } return b @@ -122,7 +122,7 @@ func (c *PodController) ControlLoop(quit <-chan struct{}, wg *sync.WaitGroup) { wg.Add(1) defer wg.Done() - glog.Info("starting pod controller") + klog.V(2).Info("starting pod controller") ticker := time.NewTicker(5 * time.Second) cleanTicker := time.NewTicker(20 * time.Second) fullSyncTicker := time.NewTicker(31 * time.Second) @@ -134,7 +134,7 @@ func (c *PodController) ControlLoop(quit <-chan struct{}, wg *sync.WaitGroup) { // prefer quit in case there is a leader election select { case <-quit: - glog.Info("Stopping PodController") + klog.V(2).Info("Stopping PodController") return default: } @@ -157,7 +157,7 @@ func (c *PodController) ControlLoop(quit <-chan struct{}, wg *sync.WaitGroup) { c.handleReplyTimeouts() c.cleanTimer.EndLoop() case <-quit: - glog.Info("Stopping PodController") + klog.V(2).Info("Stopping PodController") return } } @@ -170,21 +170,21 @@ func (c *PodController) ControlLoop(quit <-chan struct{}, wg *sync.WaitGroup) { // both cases in the same way might be a an issue for pods with // RestartPolicy == api.RestartPolicyNever func (c *PodController) markFailedPod(pod *api.Pod, startFailure bool, msg string) { - glog.Infof("Marking pod %s as failed", pod.Name) + klog.V(2).Infof("Marking pod %s as failed", pod.Name) pod.Status.Phase = api.PodFailed if startFailure { - glog.Warningf("Start failure for pod %s", pod.Name) + klog.Warningf("Start failure for pod %s", pod.Name) pod.Status.StartFailures += 1 // Note: spotFailure and other items in the status will get // overwritten in remedyFailedPod } _, err := c.podRegistry.UpdatePodStatus(pod, msg) if err != nil { - glog.Errorf("Error updating pod status: %v", err) + klog.Errorf("Error updating pod status: %v", err) } go func() { c.savePodLogs(pod) - glog.Infof("Returning node %s", pod.Status.BoundNodeName) + klog.V(2).Infof("Returning node %s", pod.Status.BoundNodeName) c.nodeDispenser.ReturnNode(pod.Status.BoundNodeName, false) }() } @@ -214,11 +214,11 @@ func (c *PodController) loadRegistryCredentials(pod *api.Pod) (map[string]api.Re } allCreds[string(server)] = creds if creds.Username == "" { - glog.Warningf("Found empty username for image secret %s", secretName) + klog.Warningf("Found empty username for image secret %s", secretName) } if creds.Password == "" { // Reviewer: do you think its bad to leak this info? - glog.Warningf("Found empty password for secret %s", secretName) + klog.Warningf("Found empty password for secret %s", secretName) } } @@ -257,7 +257,7 @@ func (c *PodController) resizeVolume(node *api.Node, pod *api.Pod, client nodecl return err } sizeGiB := util.ToGiBRoundUp(&size) - glog.Infof("Pod %s requested volume size of %s on node %s", + klog.V(2).Infof("Pod %s requested volume size of %s on node %s", pod.Name, pod.Spec.Resources.VolumeSize, node.Name) err, resizePerformed := c.cloudClient.ResizeVolume(node, int64(sizeGiB)) if err != nil { @@ -266,7 +266,7 @@ func (c *PodController) resizeVolume(node *api.Node, pod *api.Pod, client nodecl if resizePerformed { // Itzo still needs to take care of enlarging the root partition to // span the new, bigger volume. - glog.Infof("Resized volume on node %s, expanding partition", node.Name) + klog.V(2).Infof("Resized volume on node %s, expanding partition", node.Name) return client.ResizeVolume() } return nil @@ -299,7 +299,7 @@ func isBurstableMachine(machine string) bool { } func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { - glog.Infof("Dispatching pod %s to node %s", pod.Name, node.Name) + klog.V(2).Infof("Dispatching pod %s to node %s", pod.Name, node.Name) client := c.nodeClientFactory.GetClient(node.Status.Addresses) resizableVolume := !c.cloudClient.GetAttributes().FixedSizeVolume if resizableVolume && pod.Spec.Resources.VolumeSize != "" { @@ -307,7 +307,7 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { if err != nil { msg := fmt.Sprintf("Error resizing volume on node %s pod %s: %v", node.Name, pod.Name, err) - glog.Errorf("%s", msg) + klog.Errorf("%s", msg) c.markFailedPod(pod, true, msg) return } @@ -318,7 +318,7 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { err := c.cloudClient.SetSustainedCPU(node, *pod.Spec.Resources.SustainedCPU) if err != nil { msg := fmt.Sprintf("Error dispatching pod to node, could not modify Sustained CPU settings: %s", err) - glog.Errorln(msg) + klog.Errorln(msg) c.markFailedPod(pod, true, msg) return } @@ -329,7 +329,7 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { err := c.attachSecurityGroupsToNode(node, securityGroupsStr) if err != nil { msg := fmt.Sprintf("Error dispatching pod to node, could not attach security groups to pod %s: %s", pod.Name, err) - glog.Errorln(msg) + klog.Errorln(msg) c.markFailedPod(pod, true, msg) return } @@ -340,7 +340,7 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { err := c.cloudClient.AssignInstanceProfile(node, instanceProfile) if err != nil { msg := fmt.Sprintf("Error dispatching pod to node, could not assign instance profile %s to pod %s: %s", instanceProfile, pod.Name, err) - glog.Errorln(msg) + klog.Errorln(msg) c.markFailedPod(pod, true, msg) return } @@ -354,14 +354,14 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { err := deployPodVolumes(pod, node, c.resourceManager, c.nodeClientFactory) if err != nil { msg := fmt.Sprintf("Error deploying volumes to node for pod %s: %v", pod.Name, err) - glog.Errorln(msg) + klog.Errorln(msg) c.markFailedPod(pod, true, msg) return } err = c.updatePodUnits(pod) if err != nil { msg := fmt.Sprintf("Error updating pod units after dispatching pod to node: %v", err) - glog.Errorln(msg) + klog.Errorln(msg) c.markFailedPod(pod, true, msg) return } @@ -369,7 +369,7 @@ func (c *PodController) dispatchPodToNode(pod *api.Pod, node *api.Node) { err = setPodRunning(pod, node.Name, c.podRegistry, c.events) if err != nil { msg := fmt.Sprintf("Error updating pod status to running: %v", err) - glog.Error(msg) + klog.Error(msg) c.markFailedPod(pod, true, msg) return } @@ -389,14 +389,14 @@ func (c *PodController) SyncRunningPods() { p.Status.Phase == api.PodRunning }) if err != nil { - glog.Errorf("Could not list running pods for full sync") + klog.Errorf("Could not list running pods for full sync") return } for _, pod := range podList.Items { go func(p *api.Pod) { err := c.updatePodUnits(p) if err != nil { - glog.Error(err) + klog.Error(err) } }(pod) } @@ -412,7 +412,7 @@ func (c *PodController) TagNodeWithPodLabels(pod *api.Pod, node *api.Node) { } err := c.cloudClient.AddInstanceTags(node.Status.InstanceID, cloudLabels) if err != nil { - glog.Errorln("Error tagging node", node.Name, err) + klog.Errorln("Error tagging node", node.Name, err) } } @@ -421,7 +421,7 @@ func (c *PodController) failDispatchingPods() { return p.Status.Phase == api.PodDispatching }) if err != nil { - glog.Errorf("Could not list dispatching pods") + klog.Errorf("Could not list dispatching pods") return } for _, pod := range podList.Items { @@ -429,7 +429,7 @@ func (c *PodController) failDispatchingPods() { pod.Status.Phase = api.PodFailed _, err = c.podRegistry.UpdatePodStatus(pod, "Milpa resets/fails dispatching pods at system startup") if err != nil { - glog.Errorf("Error updating pod status: %v", err) + klog.Errorf("Error updating pod status: %v", err) continue } } @@ -438,7 +438,7 @@ func (c *PodController) failDispatchingPods() { func (c *PodController) handlePodStatusReply(reply FullPodStatus) { pod, err := c.podRegistry.GetPod(reply.Name) if err != nil { - glog.Errorf("Error getting pod %s from registry: %v", reply.Name, err) + klog.Errorf("Error getting pod %s from registry: %v", reply.Name, err) return } podIP := api.GetPrivateIP(pod.Status.Addresses) @@ -446,7 +446,7 @@ func (c *PodController) handlePodStatusReply(reply FullPodStatus) { pod.Status.Addresses = api.NewNetworkAddresses(reply.PodIP, "") } else if reply.PodIP != "" && podIP != reply.PodIP { // Reply came in after pod has been rescheduled. - glog.Errorf("IP for pod %s has changed %s -> %s", + klog.Errorf("IP for pod %s has changed %s -> %s", reply.Name, reply.PodIP, podIP) return } @@ -464,7 +464,7 @@ func (c *PodController) handlePodStatusReply(reply FullPodStatus) { // and check the Status.Phase savedPod, _ := c.podRegistry.GetPod(pod.Name) if savedPod == nil || !api.IsTerminalPodPhase(savedPod.Status.Phase) { - glog.Errorf("Error updating pod %s status: %v", pod.Name, err) + klog.Errorf("Error updating pod %s status: %v", pod.Name, err) } } } @@ -484,7 +484,7 @@ func (c *PodController) pruneLastStatusReplies() { return false }) if err != nil { - glog.Errorf("Error getting list of pods from registry") + klog.Errorf("Error getting list of pods from registry") return } for _, replyItem := range c.lastStatusReply.Items() { @@ -502,7 +502,7 @@ func (c *PodController) handleReplyTimeouts() { return p.Status.Phase == api.PodRunning }) if err != nil { - glog.Errorf("Error getting list of pods from registry") + klog.Errorf("Error getting list of pods from registry") return } now := time.Now().UTC() @@ -523,7 +523,7 @@ func (c *PodController) maybeFailUnresponsivePod(pod *api.Pod) { node, err := c.nodeLister.GetNode(pod.Status.BoundNodeName) if err != nil { msg := fmt.Sprintf("No node found for pod %s", pod.Name) - glog.Warningf(msg) + klog.Warningf(msg) c.markFailedPod(pod, false, msg) return } @@ -532,10 +532,10 @@ func (c *PodController) maybeFailUnresponsivePod(pod *api.Pod) { if err != nil { msg := fmt.Sprintf("No status reply from pod %s in %ds failing pod", pod.Name, int(statusReplyTimeout.Seconds())) - glog.Warningf(msg) + klog.Warningf(msg) c.markFailedPod(pod, false, msg) } else { - glog.Warningf("Last chance healthcheck for pod %s saved the pod from failure. Pod status is possibly out of date", pod.Name) + klog.Warningf("Last chance healthcheck for pod %s saved the pod from failure. Pod status is possibly out of date", pod.Name) c.lastStatusReply.Set(pod.Name, time.Now().UTC()) } } @@ -562,7 +562,7 @@ func (c *PodController) checkClaimedNodes() { p.Status.Phase == api.PodRunning }) if err != nil { - glog.Error(err) + klog.Error(err) return } podToNode := make(map[string]string) @@ -572,7 +572,7 @@ func (c *PodController) checkClaimedNodes() { nodeList, err := c.nodeLister.ListNodes(registry.MatchAllNodes) if err != nil { - glog.Error(err) + klog.Error(err) return } for _, node := range nodeList.Items { @@ -588,7 +588,7 @@ func (c *PodController) checkClaimedNodes() { for nodeName, podName := range lastWrongPod { lastPodName, exists := wrongPod[nodeName] if exists && lastPodName == podName { - glog.Errorf("Found claimed node %s with incorrect pod assignment %s", + klog.Errorf("Found claimed node %s with incorrect pod assignment %s", nodeName, podName) c.nodeDispenser.ReturnNode(nodeName, false) } @@ -608,7 +608,7 @@ func (c *PodController) checkRunningPods() { nodeList, err := c.nodeLister.ListNodes(registry.MatchAllNodes) if err != nil { - glog.Error(err) + klog.Error(err) return } nodeToPod := make(map[string]string) @@ -621,7 +621,7 @@ func (c *PodController) checkRunningPods() { return p.Status.Phase == api.PodRunning }) if err != nil { - glog.Error(err) + klog.Error(err) return } @@ -636,16 +636,16 @@ func (c *PodController) checkRunningPods() { if exists && lastNodeName == nodeName { msg := fmt.Sprintf("Found running pod %s with incorrect node assignment %s", podName, nodeName) - glog.Errorf("%s", msg) + klog.Errorf("%s", msg) pod, err := c.podRegistry.GetPod(podName) if err != nil { - glog.Errorf("Getting broken pod from registry: %v", err) + klog.Errorf("Getting broken pod from registry: %v", err) continue } pod.Status.Phase = api.PodFailed _, err = c.podRegistry.UpdatePodStatus(pod, msg) if err != nil { - glog.Errorf("Error updating pod status: %v", err) + klog.Errorf("Error updating pod status: %v", err) continue } } @@ -687,7 +687,7 @@ func (c *PodController) schedulePod(pod *api.Pod) { } pod, err := c.setPodDispatchingParams(pod, nodeReply.Node) if err != nil { - glog.Errorf("Error updating pod for dispatch to node: %s", err) + klog.Errorf("Error updating pod for dispatch to node: %s", err) c.nodeDispenser.ReturnNode(nodeReply.Node.Name, true) return } @@ -704,7 +704,7 @@ func (c *PodController) terminateBoundPod(pod *api.Pod) { // run this in a goroutine in case it blocks (shouldn't ever happen) go func() { c.savePodLogs(pod) - glog.Infof("Returning node %s for pod %s", pod.Status.BoundNodeName, pod.Name) + klog.V(2).Infof("Returning node %s for pod %s", pod.Status.BoundNodeName, pod.Name) c.nodeDispenser.ReturnNode(pod.Status.BoundNodeName, false) }() } @@ -784,14 +784,14 @@ func (c *PodController) checkRunningPodStatus() { return p.Status.Phase == api.PodRunning }) if err != nil { - glog.Errorln("Error listing running pods", err) + klog.Errorln("Error listing running pods", err) return } for _, pod := range podList.Items { go func(p *api.Pod) { reply := c.queryPodStatus(p) if reply.Error != nil { - glog.Errorf("Error getting status of pod %s: %v", + klog.Errorf("Error getting status of pod %s: %v", reply.Name, reply.Error) } else { c.handlePodStatusReply(reply) @@ -805,22 +805,22 @@ func (c *PodController) checkRunningPodStatus() { // items in pod.Status can change behind the scenes. func (c *PodController) savePodLogs(pod *api.Pod) { if pod.Status.BoundNodeName == "" { - glog.Infof("not saving pod logs, pod is not bound") + klog.V(2).Infof("not saving pod logs, pod is not bound") return } node, err := c.nodeLister.GetNode(pod.Status.BoundNodeName) if err != nil { - glog.Infof("not saving pod logs, bound to node %q: %v", + klog.V(2).Infof("not saving pod logs, bound to node %q: %v", pod.Status.BoundNodeName, err) return } - glog.Infof("Saving pod logs") + klog.V(2).Infof("Saving pod logs") podAddresses := node.Status.Addresses if len(podAddresses) == 0 { - glog.Infof("pod %s has no bound instance, not gathering logs", + klog.V(2).Infof("pod %s has no bound instance, not gathering logs", pod.Name) } client := c.nodeClientFactory.GetClient(podAddresses) @@ -829,7 +829,7 @@ func (c *PodController) savePodLogs(pod *api.Pod) { for _, unit := range allUnits { data, err := client.GetLogs(unit.Name, 0, nodeclient.SAVE_LOG_BYTES) if err != nil { - glog.Errorf("Error saving pod %s log for unit %s: %s", + klog.Errorf("Error saving pod %s log for unit %s: %s", pod.Name, unit.Name, err.Error()) continue } @@ -839,17 +839,17 @@ func (c *PodController) savePodLogs(pod *api.Pod) { log.Content = string(data) _, err = c.logRegistry.CreateLog(log) if err != nil { - glog.Errorf("Error saving pod %s log for unit %s to registry: %s", + klog.Errorf("Error saving pod %s log for unit %s to registry: %s", pod.Name, unit.Name, err.Error()) } } } func (c *PodController) handlePodSucceeded(pod *api.Pod) { - glog.Errorf("Pod %s has succeeded", pod.Name) + klog.Errorf("Pod %s has succeeded", pod.Name) _, err := c.podRegistry.TerminatePod(pod, api.PodSucceeded, "Pod succeeded") if err != nil { - glog.Errorf("Error updating pod %s spec phase: %v", + klog.Errorf("Error updating pod %s spec phase: %v", pod.Name, err) } // Pod's work is done... @@ -869,7 +869,7 @@ func podNeedsControlling(p *api.Pod) bool { func (c *PodController) ControlPods() { podlist, err := c.podRegistry.ListPods(podNeedsControlling) if err != nil { - glog.Errorf("Error listing pods %v", err) + klog.Errorf("Error listing pods %v", err) } if len(podlist.Items) <= 0 { return @@ -886,18 +886,18 @@ func (c *PodController) ControlPods() { case api.PodWaiting: c.schedulePod(pod) case api.PodDispatching: - glog.Warningf("Previously dispatching pod %s is not finished dispatching", pod.Name) + klog.Warningf("Previously dispatching pod %s is not finished dispatching", pod.Name) case api.PodRunning: - glog.Warningf("Pod %s is already in desired state, no control necessary", pod.Name) + klog.Warningf("Pod %s is already in desired state, no control necessary", pod.Name) case api.PodFailed: remedyFailedPod(pod, c.podRegistry) case api.PodSucceeded: c.handlePodSucceeded(pod) case api.PodTerminated: // We've likely set this pod as dead after too many failures - glog.Warningf("pod %s is terminated but speced to be running", pod.Name) + klog.Warningf("pod %s is terminated but speced to be running", pod.Name) default: - glog.Errorf("Unknown pod phase: %s", pod.Status.Phase) + klog.Errorf("Unknown pod phase: %s", pod.Status.Phase) } case api.PodTerminated: // if waiting, just mark it as terminated diff --git a/pkg/server/pod_controller_utils.go b/pkg/server/pod_controller_utils.go index aaecb3d7..dac47ef9 100644 --- a/pkg/server/pod_controller_utils.go +++ b/pkg/server/pod_controller_utils.go @@ -7,7 +7,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/server/events" "github.com/elotl/cloud-instance-provider/pkg/server/registry" - "github.com/golang/glog" + "k8s.io/klog" ) // The routines in here are shared between pod_controller and @@ -89,7 +89,7 @@ func computePodPhase(policy api.RestartPolicy, unitstatus []api.UnitStatus, podN // running state (or in the created state, if the helper in // itzo is a bit slow to start up). failMsg = fmt.Sprintf("Invalid unit status for unit %s", us.Name) - glog.Warningln(failMsg) + klog.Warningln(failMsg) valid = false } case api.RestartPolicyNever: @@ -126,13 +126,13 @@ func computePodPhase(policy api.RestartPolicy, unitstatus []api.UnitStatus, podN } if us.State.Terminated != nil && us.State.Terminated.ExitCode != 0 { failMsg = fmt.Sprintf("Invalid unit status for unit %s", us.Name) - glog.Warningln(failMsg) + klog.Warningln(failMsg) valid = false } } } if !valid { - glog.Warningf("Invalid unit state for pod %s. Setting pod phase to Failed", podName) + klog.Warningf("Invalid unit state for pod %s. Setting pod phase to Failed", podName) phase = api.PodFailed } return phase, failMsg @@ -190,7 +190,7 @@ func runningMaxLicensePods(podRegistry *registry.PodRegistry, maxResources int) p.Status.Phase == api.PodRunning) }) if err != nil { - glog.Errorf("Error listing pods for checking license limits: %s", err.Error()) + klog.Errorf("Error listing pods for checking license limits: %s", err.Error()) return true } if len(pods.Items) >= maxResources { @@ -204,7 +204,7 @@ func remedyFailedPod(pod *api.Pod, podRegistry *registry.PodRegistry) { pod.Spec.RestartPolicy != api.RestartPolicyNever { msg := fmt.Sprintf("Pod %s has failed to start %d times, retrying", pod.Name, pod.Status.StartFailures) - glog.Warningf("%s", msg) + klog.Warningf("%s", msg) // reset most everything in the status pod.Status = api.PodStatus{ Phase: api.PodWaiting, @@ -212,7 +212,7 @@ func remedyFailedPod(pod *api.Pod, podRegistry *registry.PodRegistry) { } podRegistry.UpdatePodStatus(pod, msg) } else { - glog.Errorf("pod %s has failed to start %d times. Not trying again, pod has failed", + klog.Errorf("pod %s has failed to start %d times. Not trying again, pod has failed", pod.Name, pod.Status.StartFailures) podRegistry.TerminatePod(pod, api.PodFailed, "Pod failed: too many start failures") @@ -267,7 +267,7 @@ func updatePodWithStatus(pod *api.Pod, reply FullPodStatus) (changed, startFailu } if podPhase != pod.Status.Phase { - glog.Infof("Changing pod %s phase %s -> %s", + klog.V(2).Infof("Changing pod %s phase %s -> %s", pod.Name, pod.Status.Phase, podPhase) pod.Status.Phase = podPhase if podPhase == api.PodFailed { @@ -275,7 +275,7 @@ func updatePodWithStatus(pod *api.Pod, reply FullPodStatus) (changed, startFailu } if (podPhase == api.PodFailed || podPhase == api.PodSucceeded) && pod.Status.BoundNodeName == "" { - glog.Errorf("Programming error: unbound pod %s is %s", + klog.Errorf("Programming error: unbound pod %s is %s", pod.Name, podPhase) } } diff --git a/pkg/server/registry/event_registry.go b/pkg/server/registry/event_registry.go index c5d0b8a9..48a441d9 100644 --- a/pkg/server/registry/event_registry.go +++ b/pkg/server/registry/event_registry.go @@ -11,7 +11,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/etcd" "github.com/elotl/cloud-instance-provider/pkg/server/events" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -81,7 +81,7 @@ func (reg *EventRegistry) Handle(e events.Event) error { ev.Message = e.Message _, err := reg.CreateEvent(ev) if err != nil { - glog.Errorf("Error creating event %v in storage: %v", ev, err) + klog.Errorf("Error creating event %v in storage: %v", ev, err) return err } return nil @@ -137,7 +137,7 @@ func (reg *EventRegistry) ListEventsWithPrefix(prefix string, filter func(*api.E pairs, err := reg.Storer.List(prefix) eventList := api.NewEventList() if err != nil { - glog.Errorf("Error listing events in storage with prefix %s: %v", + klog.Errorf("Error listing events in storage with prefix %s: %v", prefix, err) return eventList, err } @@ -149,7 +149,7 @@ func (reg *EventRegistry) ListEventsWithPrefix(prefix string, filter func(*api.E e := api.NewEvent() err = reg.codec.Unmarshal(pair.Value, e) if err != nil { - glog.Errorf("Error unmarshalling single event in list operation: %v", err) + klog.Errorf("Error unmarshalling single event in list operation: %v", err) continue } if filter(e) { @@ -231,7 +231,7 @@ func (reg *EventRegistry) DeleteEvent(e *api.Event) (*api.Event, error) { TTL: reg.ttl, }) if err != nil { - glog.Warningf("Could not create deleted event %s in registry: %v", + klog.Warningf("Could not create deleted event %s in registry: %v", e.Name, err) } return e, nil diff --git a/pkg/server/registry/log_registry.go b/pkg/server/registry/log_registry.go index affd801d..d8a868e7 100644 --- a/pkg/server/registry/log_registry.go +++ b/pkg/server/registry/log_registry.go @@ -10,7 +10,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/etcd" "github.com/elotl/cloud-instance-provider/pkg/server/events" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -126,7 +126,7 @@ func (reg *LogRegistry) ListLogs(creatorName, logName string) (*api.LogFileList, pairs, err := reg.Storer.List(key) loglist := api.NewLogFileList() if err != nil { - glog.Errorf("Error listing logs in storage: %v", err) + klog.Errorf("Error listing logs in storage: %v", err) return loglist, err } for _, pair := range pairs { @@ -139,7 +139,7 @@ func (reg *LogRegistry) ListLogs(creatorName, logName string) (*api.LogFileList, log := api.NewLogFile() err = reg.codec.Unmarshal(pair.Value, log) if err != nil { - glog.Errorf("Error unmarshalling single log in list operation: %v", err) + klog.Errorf("Error unmarshalling single log in list operation: %v", err) continue } loglist.Items = append(loglist.Items, log) diff --git a/pkg/server/registry/node_registry.go b/pkg/server/registry/node_registry.go index 11fd3feb..e4544719 100644 --- a/pkg/server/registry/node_registry.go +++ b/pkg/server/registry/node_registry.go @@ -10,7 +10,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/etcd" "github.com/elotl/cloud-instance-provider/pkg/server/events" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -84,7 +84,7 @@ func (reg *NodeRegistry) Delete(name string) (api.MilpaObject, error) { // // we allow users to delete nodes? // in.Status.Phase = api.NodeTerminating // now := time.Now().UTC() - // glog.Infof("Setting deletion time") + // klog.V(2).Infof("Setting deletion time") // in.DeletionTimestamp = &now // return nil // }) @@ -173,7 +173,7 @@ func (reg *NodeRegistry) listNodes(nodePath string, filter func(*api.Node) bool) pairs, err := reg.Storer.List(nodePath) nodelist := api.NewNodeList() if err != nil { - glog.Errorf("Error listing nodes in storage: %v", err) + klog.Errorf("Error listing nodes in storage: %v", err) return nodelist, err } nodelist.Items = make([]*api.Node, 0, len(pairs)) @@ -188,7 +188,7 @@ func (reg *NodeRegistry) listNodes(nodePath string, filter func(*api.Node) bool) node := api.NewNode() err = reg.Codec.Unmarshal(pair.Value, node) if err != nil { - glog.Errorf("Error unmarshalling single node in list operation: %v", err) + klog.Errorf("Error unmarshalling single node in list operation: %v", err) continue } if filter(node) { @@ -199,7 +199,7 @@ func (reg *NodeRegistry) listNodes(nodePath string, filter func(*api.Node) bool) } func (reg *NodeRegistry) PurgeNode(node *api.Node) (*api.Node, error) { - glog.Infof("Purging node %v", node) + klog.V(2).Infof("Purging node %v", node) reg.eventSystem.Emit(events.NodePurged, "node-registry", node) node.Status.Phase = api.NodeTerminated @@ -227,7 +227,7 @@ func (reg *NodeRegistry) PurgeNode(node *api.Node) (*api.Node, error) { TTL: trashTTL, }) if err != nil { - glog.Warningf("Could not create deleted node %s in registry: %s", + klog.Warningf("Could not create deleted node %s in registry: %s", node.Name, err.Error()) } return node, nil @@ -244,7 +244,7 @@ func validStateChange(old, new api.NodePhase) bool { // a node failed to come up, somehow our initial update failed // so we are shutting them down. If the cloud is OK with it, // we can let it pass. Log it and carry on - glog.Warningf("Racy termination: attempting to terminate Creating node") + klog.Warningf("Racy termination: attempting to terminate Creating node") } return true default: @@ -295,7 +295,7 @@ func validStateChange(old, new api.NodePhase) bool { return false } } - glog.Fatalf("Programming error: Reached end of state transition table") + klog.Fatalf("Programming error: Reached end of state transition table") return false } diff --git a/pkg/server/registry/pod_registry.go b/pkg/server/registry/pod_registry.go index 3a88d4ce..4c5064bd 100644 --- a/pkg/server/registry/pod_registry.go +++ b/pkg/server/registry/pod_registry.go @@ -12,7 +12,7 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/instanceselector" "github.com/elotl/cloud-instance-provider/pkg/util/validation/field" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -138,7 +138,7 @@ func (reg *PodRegistry) isLivePod(name string) bool { if err == store.ErrKeyNotFound { return false } else if err != nil { - glog.Errorf("Error getting pod: %s, assuming pod is alive", err) + klog.Errorf("Error getting pod: %s, assuming pod is alive", err) return true } @@ -232,7 +232,7 @@ func (reg *PodRegistry) ListPods(filter func(*api.Pod) bool) (*api.PodList, erro pairs, err := reg.Storer.List(PodPath) podlist := api.NewPodList() if err != nil { - glog.Errorf("Error listing pods in storage: %v", err) + klog.Errorf("Error listing pods in storage: %v", err) return podlist, err } podlist.Items = make([]*api.Pod, 0, len(pairs)) @@ -246,7 +246,7 @@ func (reg *PodRegistry) ListPods(filter func(*api.Pod) bool) (*api.PodList, erro pod := api.NewPod() err = reg.Codec.Unmarshal(pair.Value, pod) if err != nil { - glog.Errorf("Error unmarshalling single pod in list operation: %v", err) + klog.Errorf("Error unmarshalling single pod in list operation: %v", err) continue } if filter(pod) { @@ -296,7 +296,7 @@ func validStatusPhaseChange(old, new api.PodPhase) bool { case api.PodTerminated: return false } - glog.Fatalf("Programming error: Reached end of state transition table") + klog.Fatalf("Programming error: Reached end of state transition table") return false } @@ -357,7 +357,7 @@ func (reg *PodRegistry) TerminatePod(pod *api.Pod, phase api.PodPhase, msg strin TTL: terminatedPodTTL, }) if err != nil { - glog.Warningf("Could not updated terminated pod %s in registry: %s", + klog.Warningf("Could not updated terminated pod %s in registry: %s", pod.Name, err.Error()) } diff --git a/pkg/server/runincontainer.go b/pkg/server/runincontainer.go index 3d9d4ce5..5459a953 100644 --- a/pkg/server/runincontainer.go +++ b/pkg/server/runincontainer.go @@ -10,9 +10,9 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/nodeclient" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/wsstream" - "github.com/golang/glog" vkapi "github.com/virtual-kubelet/virtual-kubelet/node/api" "github.com/virtual-kubelet/virtual-kubelet/trace" + "k8s.io/klog" ) type WinSize struct { @@ -26,7 +26,7 @@ func (p *InstanceProvider) RunInContainer(ctx context.Context, namespace, podNam ctx, span := trace.StartSpan(ctx, "RunInContainer") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, podName, containerNameKey, containerName) - glog.Infof("RunInContainer %q %v", podName, cmd) + klog.V(2).Infof("RunInContainer %q %v", podName, cmd) tty := attach.TTY() stdin := attach.Stdin() stdout := attach.Stdout() @@ -56,13 +56,13 @@ func (p *InstanceProvider) RunInContainer(ctx context.Context, namespace, podNam // Send tty resize messages to the other side. go func() { for termsize := range resize { - glog.Infof("exec requesting window resize %+v", termsize) + klog.V(2).Infof("exec requesting window resize %+v", termsize) err = sendWinSize(ws, WinSize{ Cols: termsize.Width, Rows: termsize.Height, }) if err != nil { - glog.Errorf("exec sending window resize: %v", err) + klog.Errorf("exec sending window resize: %v", err) continue } } @@ -111,11 +111,11 @@ func (p *InstanceProvider) muxToWS(ws *wsstream.WSStream, stdin io.Reader, stdou n, err := stdin.Read(b) eof := err == io.EOF if err != nil && !eof { - glog.Errorf("exec reading stdin: %v", err) + klog.Errorf("exec reading stdin: %v", err) return } if eof && n == 0 { - glog.Infof("exec stdin EOF") + klog.V(2).Infof("exec stdin EOF") return } // CRLF conversion if a tty is used. @@ -125,11 +125,11 @@ func (p *InstanceProvider) muxToWS(ws *wsstream.WSStream, stdin io.Reader, stdou f := wsstream.PackMessage(wsstream.StdinChan, b) err = ws.WriteRaw(f) if err != nil { - glog.Errorf("exec ws send: %v", err) + klog.Errorf("exec ws send: %v", err) return } if eof { - glog.Infof("exec stdin EOF") + klog.V(2).Infof("exec stdin EOF") return } } @@ -141,7 +141,7 @@ func (p *InstanceProvider) muxToWS(ws *wsstream.WSStream, stdin io.Reader, stdou case msg := <-ws.ReadMsg(): ch, data, err := wsstream.UnpackMessage(msg) if err != nil { - glog.Errorf("exec reading from ws: %v", err) + klog.Errorf("exec reading from ws: %v", err) continue } var writer io.WriteCloser @@ -159,18 +159,18 @@ func (p *InstanceProvider) muxToWS(ws *wsstream.WSStream, stdin io.Reader, stdou case wsstream.ExitCodeChan: exitCode, err := strconv.Atoi(string(data)) if err != nil { - glog.Errorf("exec invalid exit code %v", data) + klog.Errorf("exec invalid exit code %v", data) continue } - glog.Infof("exec got exit code %d", exitCode) + klog.V(2).Infof("exec got exit code %d", exitCode) continue default: - glog.Errorf("exec unknown channel %d from ws", ch) + klog.Errorf("exec unknown channel %d from ws", ch) continue } _, err = writer.Write(data) if err != nil { - glog.Errorf("exec writing to output %d: %v", ch, err) + klog.Errorf("exec writing to output %d: %v", ch, err) break } } diff --git a/pkg/server/server.go b/pkg/server/server.go index 64ef6565..ea3fa3ed 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -1,7 +1,6 @@ package server import ( - "flag" "fmt" "net" "sort" @@ -27,7 +26,6 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/util/instanceselector" "github.com/elotl/cloud-instance-provider/pkg/util/timeoutmap" "github.com/elotl/cloud-instance-provider/pkg/util/validation/field" - "github.com/golang/glog" "github.com/virtual-kubelet/node-cli/manager" "github.com/virtual-kubelet/virtual-kubelet/errdefs" "github.com/virtual-kubelet/virtual-kubelet/trace" @@ -35,6 +33,7 @@ import ( "google.golang.org/grpc" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog" utiliptables "k8s.io/kubernetes/pkg/util/iptables" utilexec "k8s.io/utils/exec" ) @@ -79,7 +78,7 @@ type InstanceProvider struct { } func validateWriteToEtcd(client *etcd.SimpleEtcd) error { - glog.Info("validating write access to etcd (will block until we can connect)") + klog.V(2).Info("validating write access to etcd (will block until we can connect)") wo := &store.WriteOptions{ IsDir: false, TTL: 2 * time.Second, @@ -89,7 +88,7 @@ func validateWriteToEtcd(client *etcd.SimpleEtcd) error { if err != nil { return err } - glog.Info("Write to etcd successful") + klog.V(2).Info("write to etcd successful") return nil } @@ -98,7 +97,7 @@ func setupEtcd(configFile, dataDir string, quit <-chan struct{}, wg *sync.WaitGr // change in the future if we want the embedded server to join // existing etcd server, but, for now just don't start it. var client *etcd.SimpleEtcd - glog.Infof("starting internal etcd") + klog.V(2).Infof("starting internal etcd") etcdServer := etcd.EtcdServer{ ConfigFile: configFile, DataDir: dataDir, @@ -117,7 +116,7 @@ func setupEtcd(configFile, dataDir string, quit <-chan struct{}, wg *sync.WaitGr } func ensureRegionUnchanged(etcdClient *etcd.SimpleEtcd, region string) error { - glog.Infof("Ensuring region has not changed") + klog.V(2).Infof("ensuring region has not changed") var savedRegion string pair, err := etcdClient.Get(etcdClusterRegionPath) if err != nil { @@ -142,8 +141,6 @@ func ensureRegionUnchanged(etcdClient *etcd.SimpleEtcd, region string) error { func NewInstanceProvider(configFilePath, nodeName, internalIP string, daemonEndpointPort int32, debugServer bool, rm *manager.ResourceManager, systemQuit <-chan struct{}) (*InstanceProvider, error) { systemWG := &sync.WaitGroup{} - flag.CommandLine.Parse([]string{"--logtostderr", "--v=5"}) - execer := utilexec.New() ipt := utiliptables.New(execer, utiliptables.ProtocolIpv4) portManager := portmanager.NewPortManager(ipt) @@ -179,7 +176,7 @@ func NewInstanceProvider(configFilePath, nodeName, internalIP string, daemonEndp nametag = controllerID } - glog.Infof("ControllerID: %s", controllerID) + klog.V(2).Infof("ControllerID: %s", controllerID) certFactory, err := certs.New(etcdClient) if err != nil { @@ -220,10 +217,10 @@ func NewInstanceProvider(configFilePath, nodeName, internalIP string, daemonEndp return nil, fmt.Errorf("error validating server.yml: %v", errs.ToAggregate()) } - glog.Infof("Setting up events") + klog.V(2).Infof("setting up events") eventSystem := events.NewEventSystem(systemQuit, systemWG) - glog.Infof("Setting up registry") + klog.V(2).Infof("setting up registry") podRegistry := registry.NewPodRegistry( etcdClient, api.VersioningCodec{}, eventSystem, statefulValidator) nodeRegistry := registry.NewNodeRegistry( @@ -347,7 +344,7 @@ func NewInstanceProvider(configFilePath, nodeName, internalIP string, daemonEndp if ctrl, ok := controllers["ImageController"]; ok { azureImageController := ctrl.(*azure.ImageController) - glog.Infof("Downloading Milpa node image to local Azure subscription (this could take a few minutes)") + klog.V(2).Infof("downloading Milpa node image to local Azure subscription (this could take a few minutes)") azureImageController.WaitForAvailable() } @@ -376,7 +373,7 @@ func (p *InstanceProvider) setupDebugServer() error { go func() { err := grpcServer.Serve(lis) if err != nil { - glog.Errorln("Error returned from Serve:", err) + klog.Errorln("Error returned from Serve:", err) } }() return nil @@ -405,10 +402,10 @@ func (p *InstanceProvider) addOrRemovePodPortMappings(pod *v1.Pod, add bool) err return fmt.Errorf("empty pod IP for %q %+v", pod.Name, portMappings) } if add { - glog.V(4).Infof("adding %q port mappings %+v", pod.Name, portMappings) + klog.V(4).Infof("adding %q port mappings %+v", pod.Name, portMappings) return p.portManager.AddPodPortMappings(podIP.String(), portMappings) } - glog.V(4).Infof("removing %q port mappings %+v", pod.Name, portMappings) + klog.V(4).Infof("removing %q port mappings %+v", pod.Name, portMappings) p.portManager.RemovePodPortMappings(podIP.String()) return nil } @@ -416,12 +413,12 @@ func (p *InstanceProvider) addOrRemovePodPortMappings(pod *v1.Pod, add bool) err func (p *InstanceProvider) Handle(ev events.Event) error { milpaPod, ok := ev.Object.(*api.Pod) if !ok { - glog.Errorf("event %v with unknown object", ev) + klog.Errorf("event %v with unknown object", ev) return nil } pod, err := p.milpaToK8sPod(milpaPod) if err != nil { - glog.Errorf("converting milpa pod %s: %v", milpaPod.Name, err) + klog.Errorf("converting milpa pod %s: %v", milpaPod.Name, err) return nil } if ev.Status == events.PodUpdated { @@ -429,16 +426,16 @@ func (p *InstanceProvider) Handle(ev events.Event) error { pod.Status.PodIP != "" { // Pod is up and running, let's set up its hostport mappings. if err := p.addOrRemovePodPortMappings(pod, true); err != nil { - glog.Warningf("adding hostports %q: %v", milpaPod.Name, err) + klog.Warningf("adding hostports %q: %v", milpaPod.Name, err) } } else if api.IsTerminalPodPhase(milpaPod.Status.Phase) { // Remove port mappings if pod has been terminated or stopped. if err := p.addOrRemovePodPortMappings(pod, false); err != nil { - glog.Warningf("removing hostports %q: %v", milpaPod.Name, err) + klog.Warningf("removing hostports %q: %v", milpaPod.Name, err) } } } - glog.Infof("milpa pod %s event %v", milpaPod.Name, ev) + klog.V(4).Infof("milpa pod %q event %v", milpaPod.Name, ev) p.notifier(pod) return nil } @@ -451,7 +448,7 @@ func (p *InstanceProvider) Stop() { case <-waitGroupDone: return case <-time.After(time.Second * quitTimeout): - glog.Errorf( + klog.Errorf( "Loops were still running after %d seconds, forcing exit", quitTimeout) return @@ -460,7 +457,7 @@ func (p *InstanceProvider) Stop() { func waitForWaitGroup(wg *sync.WaitGroup, waitGroupDone chan struct{}) { wg.Wait() - glog.Info("All controllers have exited") + klog.V(2).Info("all controllers have exited") waitGroupDone <- struct{}{} } @@ -504,16 +501,16 @@ func (p *InstanceProvider) CreatePod(ctx context.Context, pod *v1.Pod) error { ctx, span := trace.StartSpan(ctx, "CreatePod") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, pod.Namespace, nameKey, pod.Name) - glog.Infof("CreatePod %q", pod.Name) + klog.V(5).Infof("CreatePod %q", pod.Name) milpaPod, err := p.k8sToMilpaPod(pod) if err != nil { - glog.Errorf("CreatePod %q: %v", pod.Name, err) + klog.Errorf("CreatePod %q: %v", pod.Name, err) return err } podRegistry := p.getPodRegistry() _, err = podRegistry.CreatePod(milpaPod) if err != nil { - glog.Errorf("CreatePod %q: %v", pod.Name, err) + klog.Errorf("CreatePod %q: %v", pod.Name, err) return err } p.notifier(pod) @@ -524,16 +521,16 @@ func (p *InstanceProvider) UpdatePod(ctx context.Context, pod *v1.Pod) error { ctx, span := trace.StartSpan(ctx, "UpdatePod") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, pod.Namespace, nameKey, pod.Name) - glog.Infof("UpdatePod %q", pod.Name) + klog.V(5).Infof("UpdatePod %q", pod.Name) milpaPod, err := p.k8sToMilpaPod(pod) if err != nil { - glog.Errorf("UpdatePod %q: %v", pod.Name, err) + klog.Errorf("UpdatePod %q: %v", pod.Name, err) return err } podRegistry := p.getPodRegistry() _, err = podRegistry.UpdatePodSpecAndLabels(milpaPod) if err != nil { - glog.Errorf("UpdatePod %q: %v", pod.Name, err) + klog.Errorf("UpdatePod %q: %v", pod.Name, err) return err } p.notifier(pod) @@ -544,16 +541,16 @@ func (p *InstanceProvider) DeletePod(ctx context.Context, pod *v1.Pod) (err erro ctx, span := trace.StartSpan(ctx, "DeletePod") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, pod.Namespace, nameKey, pod.Name) - glog.Infof("DeletePod %q", pod.Name) + klog.V(5).Infof("DeletePod %q", pod.Name) milpaPod, err := p.k8sToMilpaPod(pod) if err != nil { - glog.Errorf("DeletePod %q: %v", pod.Name, err) + klog.Errorf("DeletePod %q: %v", pod.Name, err) return err } podRegistry := p.getPodRegistry() _, err = podRegistry.Delete(milpaPod.Name) if err != nil { - glog.Errorf("DeletePod %q: %v", pod.Name, err) + klog.Errorf("DeletePod %q: %v", pod.Name, err) return err } p.notifier(pod) @@ -564,19 +561,19 @@ func (p *InstanceProvider) GetPod(ctx context.Context, namespace, name string) ( ctx, span := trace.StartSpan(ctx, "GetPod") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, name) - glog.Infof("GetPod %q", name) + klog.V(5).Infof("GetPod %q", name) podRegistry := p.getPodRegistry() milpaPod, err := podRegistry.GetPod(util.WithNamespace(namespace, name)) if err != nil { if err == store.ErrKeyNotFound { return nil, errdefs.NotFoundf("pod %s/%s is not found", namespace, name) } - glog.Errorf("GetPod %q: %v", name, err) + klog.Errorf("GetPod %q: %v", name, err) return nil, err } pod, err := p.milpaToK8sPod(milpaPod) if err != nil { - glog.Errorf("GetPod %q: %v", name, err) + klog.Errorf("GetPod %q: %v", name, err) return nil, err } return pod, nil @@ -586,16 +583,16 @@ func (p *InstanceProvider) GetPodStatus(ctx context.Context, namespace, name str ctx, span := trace.StartSpan(ctx, "GetPodStatus") defer span.End() ctx = addAttributes(ctx, span, namespaceKey, namespace, nameKey, name) - glog.Infof("GetPodStatus %q", name) + klog.V(5).Infof("GetPodStatus %q", name) podRegistry := p.getPodRegistry() milpaPod, err := podRegistry.GetPod(util.WithNamespace(namespace, name)) if err != nil { - glog.Errorf("GetPodStatus %q: %v", name, err) + klog.Errorf("GetPodStatus %q: %v", name, err) return nil, err } pod, err := p.milpaToK8sPod(milpaPod) if err != nil { - glog.Errorf("GetPodStatus %q: %v", name, err) + klog.Errorf("GetPodStatus %q: %v", name, err) return nil, err } return &pod.Status, nil @@ -604,20 +601,20 @@ func (p *InstanceProvider) GetPodStatus(ctx context.Context, namespace, name str func (p *InstanceProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) { ctx, span := trace.StartSpan(ctx, "GetPods") defer span.End() - glog.Infof("GetPods") + klog.V(5).Infof("GetPods") podRegistry := p.getPodRegistry() milpaPods, err := podRegistry.ListPods(func(pod *api.Pod) bool { return true }) if err != nil { - glog.Errorf("GetPods: %v", err) + klog.Errorf("GetPods: %v", err) return nil, err } pods := make([]*v1.Pod, len(milpaPods.Items)) for i, milpaPod := range milpaPods.Items { pods[i], err = p.milpaToK8sPod(milpaPod) if err != nil { - glog.Errorf("GetPods: %v", err) + klog.Errorf("GetPods: %v", err) return nil, err } } @@ -627,7 +624,7 @@ func (p *InstanceProvider) GetPods(ctx context.Context) ([]*v1.Pod, error) { func (p *InstanceProvider) ConfigureNode(ctx context.Context, n *v1.Node) { ctx, span := trace.StartSpan(ctx, "ConfigureNode") defer span.End() - glog.Infof("ConfigureNode") + klog.V(5).Infof("ConfigureNode") n.Status.Capacity = p.capacity() n.Status.Allocatable = p.capacity() n.Status.Conditions = p.nodeConditions() diff --git a/pkg/server/streaming_utils.go b/pkg/server/streaming_utils.go index 5a08390e..2f9b2ec2 100644 --- a/pkg/server/streaming_utils.go +++ b/pkg/server/streaming_utils.go @@ -12,8 +12,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/server/registry" "github.com/elotl/cloud-instance-provider/pkg/util" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) type SendRecver interface { @@ -97,7 +97,7 @@ func (s InstanceProvider) grpcToWSPump(stream SendRecver, addresses []api.Networ if err != nil { if err != io.EOF { wrappedErr := util.WrapError(err, "Error in websocket send") - glog.Error(wrappedErr) + klog.Error(wrappedErr) return wrappedErr } return nil @@ -116,14 +116,14 @@ func (s InstanceProvider) grpcToWSPump(stream SendRecver, addresses []api.Networ // yuck, need to detect context being cancelled??? // I have a feeling I'm doing this wrong... default: - glog.Errorf("Error in grpc receive: %v", err) + klog.Errorf("Error in grpc receive: %v", err) } return } err = ws.WriteRaw(clientData.Data) if err != nil { if err != io.EOF { - glog.Errorf("Error in websocket send: %v", err) + klog.Errorf("Error in websocket send: %v", err) } return } diff --git a/pkg/server/update.go b/pkg/server/update.go index f140a8b3..7f287e01 100644 --- a/pkg/server/update.go +++ b/pkg/server/update.go @@ -7,8 +7,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/clientapi" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/yaml" - "github.com/golang/glog" "golang.org/x/net/context" + "k8s.io/klog" ) func (s InstanceProvider) Update(context context.Context, request *clientapi.UpdateRequest) (*clientapi.APIReply, error) { @@ -16,7 +16,7 @@ func (s InstanceProvider) Update(context context.Context, request *clientapi.Upd return notTheLeaderReply(), nil } _, objectKind, err := VersionAndKind(request.Manifest) - glog.Infof("Update request for: %s", objectKind) + klog.V(2).Infof("Update request for: %s", objectKind) if err != nil { return errToAPIReply( util.WrapError(err, "Error determining manifest kind")), nil diff --git a/pkg/util/controllerqueue/queue.go b/pkg/util/controllerqueue/queue.go index 822b8f1c..d9d3d8e1 100644 --- a/pkg/util/controllerqueue/queue.go +++ b/pkg/util/controllerqueue/queue.go @@ -3,9 +3,9 @@ package controllerqueue import ( "time" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" + "k8s.io/klog" ) type QueueFunc func(interface{}) error @@ -74,12 +74,12 @@ func (cq *Queue) handleErr(err error, key interface{}) { } if cq.queue.NumRequeues(key) < cq.maxRetries { - glog.Infof("Error syncing %s %q, retrying. Error: %v", cq.name, key, err) + klog.V(2).Infof("Error syncing %s %q, retrying. Error: %v", cq.name, key, err) cq.queue.AddRateLimited(key) return } - glog.Warningf("Dropping %s %q out of the queue: %v", cq.name, key, err) + klog.Warningf("Dropping %s %q out of the queue: %v", cq.name, key, err) cq.queue.Forget(key) } diff --git a/pkg/util/errors.go b/pkg/util/errors.go index bc7884b3..041919b0 100644 --- a/pkg/util/errors.go +++ b/pkg/util/errors.go @@ -3,7 +3,7 @@ package util import ( "fmt" - "github.com/golang/glog" + "k8s.io/klog" ) type WrappedError struct { @@ -29,7 +29,7 @@ func WrapError(err error, format string, args ...interface{}) error { msg = err.Error() } } else { - glog.Errorln("WrapError: nil error:", s) + klog.Errorln("WrapError: nil error:", s) msg = s } if we, ok := err.(WrappedError); ok { diff --git a/pkg/util/filewatcher/filewatcher.go b/pkg/util/filewatcher/filewatcher.go index ec5977f4..78e7c408 100644 --- a/pkg/util/filewatcher/filewatcher.go +++ b/pkg/util/filewatcher/filewatcher.go @@ -5,7 +5,7 @@ import ( "os" "time" - "github.com/golang/glog" + "k8s.io/klog" ) // Watches a file on the local filesystem pointed to by path. This @@ -49,12 +49,12 @@ func (fw *File) refresh() (changed bool) { if fw.statTime.Add(fw.CheckPeriod).Before(now) { info, err := os.Stat(fw.path) if err != nil { - glog.Warningf("Error getting file info at %s: %s", fw.path, err) + klog.Warningf("Error getting file info at %s: %s", fw.path, err) } if info.ModTime().After(fw.modTime) { c, err := ioutil.ReadFile(fw.path) if err != nil { - glog.Warningf("Error reading contents of file at %s: %s", fw.path, err) + klog.Warningf("Error reading contents of file at %s: %s", fw.path, err) return } changed = true diff --git a/pkg/util/instanceselector/instanceselector.go b/pkg/util/instanceselector/instanceselector.go index 6b0b08ad..5a761b1d 100644 --- a/pkg/util/instanceselector/instanceselector.go +++ b/pkg/util/instanceselector/instanceselector.go @@ -9,8 +9,8 @@ import ( "github.com/elotl/cloud-instance-provider/pkg/api" "github.com/elotl/cloud-instance-provider/pkg/util" "github.com/elotl/cloud-instance-provider/pkg/util/sets" - "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog" ) const t2UnlimitedPrice float32 = 0.05 @@ -170,15 +170,15 @@ func findCheapestInstance(matches []InstanceData) string { func (instSel *instanceSelector) getInstanceFromResources(rs api.ResourceSpec) (string, bool) { memoryRequirement, err := instSel.parseMemorySpec(rs.Memory) if err != nil { - glog.Errorf("Error parsing memory spec: %s", err) + klog.Errorf("Error parsing memory spec: %s", err) } cpuRequirements, err := parseCPUSpec(rs.CPU) if err != nil { - glog.Errorf("Error parsing CPU spec: %s", err) + klog.Errorf("Error parsing CPU spec: %s", err) } gpuRequirements, err := parseGPUSpec(rs.GPU) if err != nil { - glog.Errorf("Error parsing GPU spec: %s", err) + klog.Errorf("Error parsing GPU spec: %s", err) } matches := filterInstanceData(instSel.data, func(inst InstanceData) bool { @@ -267,7 +267,7 @@ func ResourcesToInstanceType(ps *api.PodSpec) (string, bool, error) { } if selector == nil { msg := "fatal: instanceselector has not been initialized" - glog.Errorf(msg) + klog.Errorf(msg) return "", false, fmt.Errorf(msg) } if noResourceSpecified(ps) { diff --git a/pkg/util/loop/loop.go b/pkg/util/loop/loop.go index 445d7c80..ca6421d6 100644 --- a/pkg/util/loop/loop.go +++ b/pkg/util/loop/loop.go @@ -4,7 +4,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "k8s.io/klog" ) type LoopFunc func() error @@ -36,11 +36,11 @@ func (loop *Loop) run(quit <-chan struct{}, wg *sync.WaitGroup) { case <-tick.C: err := loop.f() if err != nil { - glog.Errorf("Error executing %s Loop: %s", loop.name, err.Error()) + klog.Errorf("Error executing %s Loop: %s", loop.name, err.Error()) } case <-quit: tick.Stop() - glog.Infof("Exiting %s Loop", loop.name) + klog.V(2).Infof("Exiting %s Loop", loop.name) return } } diff --git a/pkg/util/naming.go b/pkg/util/naming.go index 725a7b50..f6176b37 100644 --- a/pkg/util/naming.go +++ b/pkg/util/naming.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "k8s.io/klog" ) const ( @@ -60,7 +60,7 @@ func CreateContainerId(podName, unitName string) string { func ContainerIdToPodAndUnitName(containerId string) (string, string) { parts := strings.Split(containerId, "..") if len(parts) != 2 { - glog.Errorf("Invalid container ID %s", containerId) + klog.Errorf("Invalid container ID %s", containerId) return "", "" } podName := parts[0] diff --git a/pkg/util/tarutil/tar.go b/pkg/util/tarutil/tar.go index 36b23182..49458d07 100644 --- a/pkg/util/tarutil/tar.go +++ b/pkg/util/tarutil/tar.go @@ -9,7 +9,7 @@ import ( "os" "path/filepath" - "github.com/golang/glog" + "k8s.io/klog" ) func CreatePackage(hostRootfs string, paths []string) (io.Reader, error) { @@ -39,10 +39,10 @@ func CreatePackage(hostRootfs string, paths []string) (io.Reader, error) { } func AddFile(tw *tar.Writer, source, target string) error { - glog.Infof("Adding file %s->%s to package\n", source, target) + klog.V(2).Infof("Adding file %s->%s to package\n", source, target) fi, err := os.Lstat(source) if err != nil { - glog.Errorf("Error LStat()ing %s: %v", source, err) + klog.Errorf("Error LStat()ing %s: %v", source, err) return err } sldest := "" @@ -50,7 +50,7 @@ func AddFile(tw *tar.Writer, source, target string) error { // Check what the symlink points to. sldest, err = os.Readlink(source) if err != nil { - glog.Errorf("Error Readlink() %s: %v", source, err) + klog.Errorf("Error Readlink() %s: %v", source, err) return err } } @@ -60,14 +60,14 @@ func AddFile(tw *tar.Writer, source, target string) error { } header, err := tar.FileInfoHeader(fi, sldest) if err != nil { - glog.Errorf("Error creating tar header for %s: %v", source, err) + klog.Errorf("Error creating tar header for %s: %v", source, err) return err } // Files/directories are inside a top-level directory called "ROOTFS" // in Milpa packages. header.Name = filepath.Join(".", "ROOTFS", target) if err = tw.WriteHeader(header); err != nil { - glog.Errorf("Error writing tar header for %s->%s: %v", + klog.Errorf("Error writing tar header for %s->%s: %v", source, target, err) return err } @@ -77,17 +77,17 @@ func AddFile(tw *tar.Writer, source, target string) error { } file, err := os.Open(source) if err != nil { - glog.Errorf("Error trying to open %s: %v", source, err) + klog.Errorf("Error trying to open %s: %v", source, err) return err } defer file.Close() n, err := io.CopyN(tw, file, fi.Size()) if err != nil { - glog.Errorf("Error copying contents of %s->%s into tarball: %v", + klog.Errorf("Error copying contents of %s->%s into tarball: %v", source, target, err) return err } - glog.Infof("Copied %d bytes for %s->%s\n", n, source, target) + klog.V(2).Infof("Copied %d bytes for %s->%s\n", n, source, target) return nil } diff --git a/pkg/util/yaml/decoder.go b/pkg/util/yaml/decoder.go index 6ebfaea7..c11576e7 100644 --- a/pkg/util/yaml/decoder.go +++ b/pkg/util/yaml/decoder.go @@ -27,7 +27,7 @@ import ( "unicode" "github.com/ghodss/yaml" - "github.com/golang/glog" + "k8s.io/klog" ) // ToJSON converts a single YAML document into a JSON document @@ -217,11 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { - glog.V(4).Infof("decoding stream as JSON") + klog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { - glog.V(4).Infof("decoding stream as YAML") + klog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -230,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - glog.V(4).Infof("reading stream failed: %v", readErr) + klog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data)