Skip to content

Commit

Permalink
chore: upgrade Go to v1.19.3 (1.x) (influxdata#23941)
Browse files Browse the repository at this point in the history
  • Loading branch information
chengshiwen committed Jul 13, 2024
1 parent ab4f7a9 commit cf343f7
Show file tree
Hide file tree
Showing 29 changed files with 64 additions and 92 deletions.
2 changes: 1 addition & 1 deletion cmd/influx_tools/internal/errlist/errlist.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func (el *ErrorList) Add(err error) {
el.errs = append(el.errs, err)
}

//Err returns whether or not an error list is an error.
// Err returns whether or not an error list is an error.
func (el *ErrorList) Err() error {
if len(el.errs) == 0 {
return nil
Expand Down
10 changes: 5 additions & 5 deletions cmd/influxd/run/command.go
Original file line number Diff line number Diff line change
Expand Up @@ -273,11 +273,11 @@ type Options struct {

// GetConfigPath returns the config path from the options.
// It will return a path by searching in this order:
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
// 1. The CLI option in ConfigPath
// 2. The environment variable INFLUXDB_CONFIG_PATH
// 3. The first influxdb.conf file on the path:
// - ~/.influxdb
// - /etc/influxdb
func (opt *Options) GetConfigPath() string {
if opt.ConfigPath != "" {
if opt.ConfigPath == os.DevNull {
Expand Down
6 changes: 2 additions & 4 deletions coordinator/points_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,8 @@ func (l sgList) Covers(t time.Time) bool {
// to start time. Therefore, if there are multiple shard groups that match
// this point's time they will be preferred in this order:
//
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
// - a shard group with the earliest end time;
// - (assuming identical end times) the shard group with the earliest start time.
func (l sgList) ShardGroupAt(t time.Time) *meta.ShardGroupInfo {
if l.items.Len() == 0 {
return nil
Expand Down Expand Up @@ -349,7 +349,6 @@ const (
)

// WritePointsWithContext writes data to the underlying storage. consitencyLevel and user are only used for clustered scenarios.
//
func (w *PointsWriter) WritePointsWithContext(ctx context.Context, database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, user meta.User, points []models.Point) error {
return w.WritePointsPrivilegedWithContext(ctx, database, retentionPolicy, consistencyLevel, points)
}
Expand All @@ -364,7 +363,6 @@ func (w *PointsWriter) WritePointsPrivileged(database, retentionPolicy string, c
// If a request for StatPointsWritten or StatValuesWritten of type ContextKey is
// sent via context values, this stores the total points and fields written in
// the memory pointed to by the associated wth the int64 pointers.
//
func (w *PointsWriter) WritePointsPrivilegedWithContext(ctx context.Context, database, retentionPolicy string, consistencyLevel models.ConsistencyLevel, points []models.Point) error {
atomic.AddInt64(&w.stats.WriteReq, 1)
atomic.AddInt64(&w.stats.PointWriteReq, int64(len(points)))
Expand Down
6 changes: 3 additions & 3 deletions monitor/diagnostics/diagnostics.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ func (f ClientFunc) Diagnostics() (*Diagnostics, error) {
// the values for each column, by row. This information is never written to an InfluxDB
// system and is display-only. An example showing, say, connections follows:
//
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
// source_ip source_port dest_ip dest_port
// 182.1.0.2 2890 127.0.0.1 38901
// 174.33.1.2 2924 127.0.0.1 38902
type Diagnostics struct {
Columns []string
Rows [][]interface{}
Expand Down
6 changes: 4 additions & 2 deletions pkg/encoding/simple8b/encoding.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,9 @@ func Decode(dst *[240]uint64, v uint64) (n int, err error) {

// Decode writes the uncompressed values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeAll(dst, src []uint64) (value int, err error) {
j := 0
for _, v := range src {
Expand All @@ -482,8 +483,9 @@ func DecodeAll(dst, src []uint64) (value int, err error) {

// DecodeBytesBigEndian writes the compressed, big-endian values from src to dst. It returns the number
// of values written or an error.
//go:nocheckptr
// nocheckptr while the underlying struct layout doesn't change
//
//go:nocheckptr
func DecodeBytesBigEndian(dst []uint64, src []byte) (value int, err error) {
if len(src)&7 != 0 {
return 0, errors.New("src length is not multiple of 8")
Expand Down
8 changes: 4 additions & 4 deletions pkg/estimator/hll/hll.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
//
// The differences are that the implementation in this package:
//
// * uses an AMD64 optimised xxhash algorithm instead of murmur;
// * uses some AMD64 optimisations for things like clz;
// * works with []byte rather than a Hash64 interface, to reduce allocations;
// * implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
// - uses an AMD64 optimised xxhash algorithm instead of murmur;
// - uses some AMD64 optimisations for things like clz;
// - works with []byte rather than a Hash64 interface, to reduce allocations;
// - implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler
//
// Based on some rough benchmarking, this implementation of HyperLogLog++ is
// around twice as fast as the github.com/clarkduvall/hyperloglog implementation.
Expand Down
2 changes: 1 addition & 1 deletion pkg/tar/stream.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func StreamFile(f os.FileInfo, shardRelativePath, fullPath string, tw *tar.Write
return StreamRenameFile(f, f.Name(), shardRelativePath, fullPath, tw)
}

/// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// Stream a single file to tw, using tarHeaderFileName instead of the actual filename
// e.g., when we want to write a *.tmp file using the original file's non-tmp name.
func StreamRenameFile(f os.FileInfo, tarHeaderFileName, relativePath, fullPath string, tw *tar.Writer) error {
h, err := tar.FileInfoHeader(f, f.Name())
Expand Down
3 changes: 1 addition & 2 deletions pkg/tracing/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ Package tracing provides a way for capturing hierarchical traces.
To start a new trace with a root span named select
trace, span := tracing.NewTrace("select")
trace, span := tracing.NewTrace("select")
It is recommended that a span be forwarded to callees using the
context package. Firstly, create a new context with the span associated
Expand All @@ -21,6 +21,5 @@ Once the trace is complete, it may be converted to a graph with the Tree method.
The tree is intended to be used with the Walk function in order to generate
different presentations. The default Tree#String method returns a tree.
*/
package tracing
2 changes: 1 addition & 1 deletion pkg/tracing/fields/field.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func Bool(key string, val bool) Field {
}
}

/// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
func Int64(key string, val int64) Field {
return Field{
key: key,
Expand Down
1 change: 0 additions & 1 deletion pkg/tracing/wire/binary.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
/*
Package wire is used to serialize a trace.
*/
package wire

Expand Down
4 changes: 2 additions & 2 deletions query/functions.go
Original file line number Diff line number Diff line change
Expand Up @@ -1206,8 +1206,8 @@ func (r *UnsignedCumulativeSumReducer) Emit() []UnsignedPoint {

// FloatHoltWintersReducer forecasts a series into the future.
// This is done using the Holt-Winters damped method.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
// 1. Using the series the initial values are calculated using a SSE.
// 2. The series is forecasted into the future using the iterative relations.
type FloatHoltWintersReducer struct {
// Season period
m int
Expand Down
18 changes: 10 additions & 8 deletions services/graphite/parser.go
Original file line number Diff line number Diff line change
Expand Up @@ -396,16 +396,18 @@ type nodes []*node
// less than a non-wildcard value.
//
// For example, the filters:
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// "*.*"
// "servers.*"
// "servers.localhost"
// "*.localhost"
//
// Would be sorted as:
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
//
// "servers.localhost"
// "servers.*"
// "*.localhost"
// "*.*"
func (n *nodes) Less(j, k int) bool {
if (*n)[j].value == "*" && (*n)[k].value != "*" {
return false
Expand Down
1 change: 0 additions & 1 deletion services/httpd/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,6 @@ func (h *Handler) async(q *influxql.Query, results <-chan *query.Result) {
// in the database URL query value. It is encoded using a forward slash like
// "database/retentionpolicy" and we should be able to simply split that string
// on the forward slash.
//
func bucket2dbrp(bucket string) (string, string, error) {
// test for a slash in our bucket name.
switch idx := strings.IndexByte(bucket, '/'); idx {
Expand Down
17 changes: 8 additions & 9 deletions services/httpd/pprof.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,17 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
}

// archiveProfilesAndQueries collects the following profiles:
// - goroutine profile
// - heap profile
// - blocking profile
// - mutex profile
// - (optionally) CPU profile
// - goroutine profile
// - heap profile
// - blocking profile
// - mutex profile
// - (optionally) CPU profile
//
// It also collects the following query results:
//
// - SHOW SHARDS
// - SHOW STATS
// - SHOW DIAGNOSTICS
// - SHOW SHARDS
// - SHOW STATS
// - SHOW DIAGNOSTICS
//
// All information is added to a tar archive and then compressed, before being
// returned to the requester as an archive file. Where profiles support debug
Expand All @@ -60,7 +60,6 @@ func (h *Handler) handleProfiles(w http.ResponseWriter, r *http.Request) {
//
// The value after the `cpu` query parameter is not actually important, as long
// as there is something there.
//
func (h *Handler) archiveProfilesAndQueries(w http.ResponseWriter, r *http.Request) {
// prof describes a profile name and a debug value, or in the case of a CPU
// profile, the number of seconds to collect the profile for.
Expand Down
3 changes: 2 additions & 1 deletion services/httpd/response_logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,8 @@ func redactPassword(r *http.Request) {
// in addition to the common fields, we also append referrer, user agent,
// request ID and response time (microseconds)
// ie, in apache mod_log_config terms:
// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D
//
// %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\"" %L %D
func buildLogLine(l *responseLogger, r *http.Request, start time.Time) string {

redactPassword(r)
Expand Down
1 change: 0 additions & 1 deletion services/httpd/v2_write_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@ import "testing"

// test of how we extract the database and retention policy from the bucket in
// our v2 api enpoint.
//
func TestV2DatabaseRetentionPolicyMapper(t *testing.T) {
tests := map[string]struct {
input string
Expand Down
1 change: 0 additions & 1 deletion services/meta/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,6 @@ func (c *Client) CreateDatabase(name string) (*DatabaseInfo, error) {
// This call is only idempotent when the caller provides the exact same
// retention policy, and that retention policy is already the default for the
// database.
//
func (c *Client) CreateDatabaseWithRetentionPolicy(name string, spec *RetentionPolicySpec) (*DatabaseInfo, error) {
if spec == nil {
return nil, errors.New("CreateDatabaseWithRetentionPolicy called with nil spec")
Expand Down
3 changes: 2 additions & 1 deletion services/opentsdb/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,8 @@ func (s *Service) handleConn(conn net.Conn) {

// handleTelnetConn accepts OpenTSDB's telnet protocol.
// Each telnet command consists of a line of the form:
// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
//
// put sys.cpu.user 1356998400 42.5 host=webserver01 cpu=0
func (s *Service) handleTelnetConn(conn net.Conn) {
defer conn.Close()
defer atomic.AddInt64(&s.stats.ActiveTelnetConnections, -1)
Expand Down
1 change: 0 additions & 1 deletion services/snapshotter/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,6 @@ func (s *Service) writeRetentionPolicyInfo(conn net.Conn, database, retentionPol
// the json buffer and the conn.
//
// we return that buffer sans the newline at the beginning.
//
func (s *Service) readRequest(r io.Reader) (*Request, []byte, error) {
var req Request
d := json.NewDecoder(r)
Expand Down
7 changes: 3 additions & 4 deletions services/storage/predicate_influxql.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,9 @@ func RewriteExprRemoveFieldKeyAndValue(expr influxql.Expr) influxql.Expr {
//
// This condition is determined when the following is true:
//
// * there is only one occurrence of the tag key `_measurement`.
// * there are no OR operators in the expression tree.
// * the operator for the `_measurement` binary expression is ==.
//
// - there is only one occurrence of the tag key `_measurement`.
// - there are no OR operators in the expression tree.
// - the operator for the `_measurement` binary expression is ==.
func HasSingleMeasurementNoOR(expr influxql.Expr) (string, bool) {
var lastMeasurement string
foundOnce := true
Expand Down
6 changes: 3 additions & 3 deletions stress/stress_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func TestTimer_Elapsed(t *testing.T) {
}
}

/// basic.go
// basic.go

// Types are off
func Test_typeArr(t *testing.T) {
Expand Down Expand Up @@ -415,7 +415,7 @@ func TestBasicQueryClient_Query(t *testing.T) {

}

/// config.go
// config.go
func Test_NewConfigWithFile(t *testing.T) {
c, err := NewConfig("stress.toml")
if err != nil {
Expand Down Expand Up @@ -590,5 +590,5 @@ func Test_NewConfigWithoutFile(t *testing.T) {
}
}

/// run.go
// run.go
// TODO
2 changes: 1 addition & 1 deletion stress/v2/stress_client/stressTest.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func (st *StressTest) GetStatementResults(sID, t string) (res []influx.Result) {
return st.queryTestResults(qryStr)
}

// Runs given qry on the test results database and returns the results or nil in case of error
// Runs given qry on the test results database and returns the results or nil in case of error
func (st *StressTest) queryTestResults(qry string) (res []influx.Result) {
response, err := st.ResultsClient.Query(influx.Query{Command: qry, Database: st.TestDB})
if err == nil {
Expand Down
2 changes: 0 additions & 2 deletions tsdb/engine/tsm1/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -1289,7 +1289,6 @@ func (e *Engine) addToIndexFromKey(keys [][]byte, fieldTypes []influxql.DataType
//
// TODO: We should consider obsolteing and removing this function in favor of
// WritePointsWithContext()
//
func (e *Engine) WritePoints(points []models.Point) error {
return e.WritePointsWithContext(context.Background(), points)
}
Expand All @@ -1303,7 +1302,6 @@ func (e *Engine) WritePoints(points []models.Point) error {
//
// It expects int64 pointers to be stored in the tsdb.StatPointsWritten and
// tsdb.StatValuesWritten keys and will store the proper values if requested.
//
func (e *Engine) WritePointsWithContext(ctx context.Context, points []models.Point) error {
values := make(map[string][]Value, len(points))
var (
Expand Down
4 changes: 1 addition & 3 deletions tsdb/engine/tsm1/ring.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ const partitions = 16
//
// To determine the partition that a series key should be added to, the series
// key is hashed and the first 8 bits are used as an index to the ring.
//
type ring struct {
// Number of keys within the ring. This is used to provide a hint for
// allocating the return values in keys(). It will not be perfectly accurate
Expand All @@ -47,8 +46,7 @@ type ring struct {
// power of 2, and for performance reasons should be larger than the number of
// cores on the host. The supported set of values for n is:
//
// {1, 2, 4, 8, 16, 32, 64, 128, 256}.
//
// {1, 2, 4, 8, 16, 32, 64, 128, 256}.
func newring(n int) (*ring, error) {
if n <= 0 || n > partitions {
return nil, fmt.Errorf("invalid number of paritions: %d", n)
Expand Down
Loading

0 comments on commit cf343f7

Please sign in to comment.