diff --git a/.gitignore b/.gitignore index b3dacc9..29b867e 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ venv cmd/test configs/kafka_brokers.json kubernetes/.packaged +*.swp # Prevent certificates from getting checked in *.ca @@ -13,4 +14,4 @@ kubernetes/.packaged *.crt *.csr *.key -*.pem \ No newline at end of file +*.pem diff --git a/.version b/.version index bafceb3..7cca401 100644 --- a/.version +++ b/.version @@ -1 +1 @@ -2.31.0 +2.32.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 8300597..f030e12 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,12 @@ Removed - for now removed features Fixed - for any bug fixes Security - in case of vulnerabilities --> +## [2.32.0] - 2024-05-07 + +### Added + +- CASMHMS-6202 - Added custom logging featues to log messages per xname + ## [2.31.0] - 2024-04-19 ### Changed diff --git a/cmd/hmcollector/hmcollector.go b/cmd/hmcollector/hmcollector.go index f9beda6..23d7621 100644 --- a/cmd/hmcollector/hmcollector.go +++ b/cmd/hmcollector/hmcollector.go @@ -1,6 +1,6 @@ // MIT License // -// (C) Copyright [2020-2021] Hewlett Packard Enterprise Development LP +// (C) Copyright [2020-2021,2024] Hewlett Packard Enterprise Development LP // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), @@ -83,6 +83,14 @@ var ( logInsecFailover = flag.Bool("hmcollector_log_insecure_failover", true, "Log/don't log TLS insecure failovers.") httpTimeout = flag.Int("http_timeout", 10, "Timeout in seconds for HTTP operations.") + // vars for custom logging configuration + logModesEnv = "" + logXnamesEnv = "" + shouldLogErrors = false + shouldLogForXnames = false + logXnames = make(map[string]struct{}) + logAllowedModes = []string{"errors"} + // This is really a hacky option that should only be used when incoming timestamps can't be trusted. // For example, if NTP isn't working and the controllers are reporting their time as from 1970. IgnoreProvidedTimestamp = flag.Bool("ignore_provided_timestamp", false, @@ -189,6 +197,15 @@ func doUpdateHSMEndpoints() { logger.Info("HSM endpoint monitoring routine shutdown.") } +func fillMap(m map[string]struct{}, values string) { + if values != "" { + for _, v := range strings.Split(values, ",") { + m[v] = struct{}{} + } + } + +} + func SetupLogging() { logLevel := os.Getenv("LOG_LEVEL") logLevel = strings.ToUpper(logLevel) @@ -218,6 +235,32 @@ func SetupLogging() { default: atomicLevel.SetLevel(zap.InfoLevel) } + + // setup the custom logging config + logModesEnv = os.Getenv("LOG_MODES") + logModes := make(map[string]struct{}) + fillMap(logModes, logModesEnv) + _, shouldLogErrors = logModes["errors"] + logXnamesEnv = os.Getenv("LOG_XNAMES") + fillMap(logXnames, logXnamesEnv) + shouldLogForXnames = len(logXnames) > 0 + + // log the custom logging config + logger.Info("Extended logging config", zap.String("modes", logModesEnv), zap.String("xnames", logXnamesEnv), zap.Any("supported modes", logAllowedModes)) + for mode, _ := range logModes { + validMode := false + for _, allowedMode := range logAllowedModes { + if mode == allowedMode { + validMode = true + break + } + } + if !validMode { + logger.Error("Invalid log mode in LOG_MODES environment variable", + zap.String("invalid mode", mode), + zap.Any("supported modes", logAllowedModes)) + } + } } // This function is used to set up an HTTP validated/non-validated client diff --git a/cmd/hmcollector/kafka.go b/cmd/hmcollector/kafka.go index 61f33b8..d0f0ed3 100644 --- a/cmd/hmcollector/kafka.go +++ b/cmd/hmcollector/kafka.go @@ -1,6 +1,6 @@ // MIT License // -// (C) Copyright [2020-2021] Hewlett Packard Enterprise Development LP +// (C) Copyright [2020-2021,2024] Hewlett Packard Enterprise Development LP // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), @@ -49,8 +49,10 @@ func writeToKafka(topic, payload string, messageID *string) { Timestamp: time.Now(), } + mId := "" if messageID != nil { msg.Key = []byte(*messageID) + mId = *messageID } // Loop on array index to avoid copy overhead of range @@ -60,6 +62,7 @@ func writeToKafka(topic, payload string, messageID *string) { brokerLogger := logger.With( zap.String("broker", thisBroker.BrokerAddress), zap.String("topic", topic), + zap.String("id", mId), ) if atomicLevel.Level() == zap.DebugLevel { // If we're at debug level, then also include the message. @@ -69,9 +72,16 @@ func writeToKafka(topic, payload string, messageID *string) { if _, hasTopic := thisBroker.TopicsToPublish[topic]; hasTopic { brokerLogger.Debug("Sent message.", zap.String("msg.Value", string(msg.Value))) + if shouldLogMessage(mId) { + brokerLogger.Info("message", zap.String("msg.Value", string(msg.Value))) + } produceErr := thisBroker.KafkaProducer.Produce(&msg, nil) if produceErr != nil { - brokerLogger.Error("Failed to produce message!") + if shouldLogErrors { + brokerLogger.Error("Failed to produce message!", zap.Error(produceErr)) + } else { + brokerLogger.Error("Failed to produce message!") + } } } else { brokerLogger.Debug("Not sending message to broker because topic not in list") @@ -80,6 +90,16 @@ func writeToKafka(topic, payload string, messageID *string) { } +func shouldLogMessage(id string) bool { + if shouldLogForXnames { + ids := strings.Split(id, ".") + xname := ids[0] + _, matches := logXnames[xname] + return matches + } + return false +} + func handleKafkaEvents(broker *hmcollector.KafkaBroker) { for event := range broker.KafkaProducer.Events() { switch ev := event.(type) { @@ -90,7 +110,11 @@ func handleKafkaEvents(broker *hmcollector.KafkaBroker) { ) if ev.TopicPartition.Error != nil { - eventLogger.Error("Failed to produce message!") + if shouldLogErrors { + eventLogger.Error("Failed to produce message!", zap.Error(ev.TopicPartition.Error)) + } else { + eventLogger.Error("Failed to produce message!") + } } else { eventLogger.Debug("Produced message.") }