From 01cc97f5063ef57e6e14733bb43636c341aad335 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sun, 10 Mar 2024 18:37:13 +0100 Subject: [PATCH 1/8] Add fallback retry to daemon This change adds a fallback retry to the daemon service this retry has a larger interval with a shorter max retry run time --- client/server/server.go | 75 ++++++++++++++++++++++++++++++++--------- 1 file changed, 59 insertions(+), 16 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index fc1e4cc2642..68edae381c3 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/cenkalti/backoff/v4" + "github.com/netbirdio/netbird/client/internal/auth" "github.com/netbirdio/netbird/client/system" @@ -43,11 +45,13 @@ type Server struct { statusRecorder *peer.Status sessionWatcher *internal.SessionWatcher - mgmProbe *internal.Probe - signalProbe *internal.Probe - relayProbe *internal.Probe - wgProbe *internal.Probe - lastProbe time.Time + mgmProbe *internal.Probe + signalProbe *internal.Probe + relayProbe *internal.Probe + wgProbe *internal.Probe + lastProbe time.Time + retryStartedOnce bool + cancelRetry bool } type oauthAuthFlow struct { @@ -125,16 +129,56 @@ func (s *Server) Start() error { } if !config.DisableAutoConnect { - go func() { - if err := internal.RunClientWithProbes(ctx, config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe); err != nil { - log.Errorf("init connections: %v", err) - } - }() + go s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) } return nil } +// connectWithRetryRuns runs the client connection with a backoff strategy where we retry the operation as additional +// mechanism to keep the client connected even when the connection is lost. +// we cancel retry if the client receive a stop or down command, or if disable auto connect is configured. +func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Config, statusRecorder *peer.Status, + mgmProbe *internal.Probe, signalProbe *internal.Probe, relayProbe *internal.Probe, wgProbe *internal.Probe) { + backOff := getConnectWithBackoff(ctx) + + runOperation := func() error { + err := internal.RunClientWithProbes(ctx, config, statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe) + if err != nil { + log.Errorf("run client connection: %v", err) + } + + if s.cancelRetry || config.DisableAutoConnect { + return backoff.Permanent(err) + } + + if !s.retryStartedOnce { + s.retryStartedOnce = true + backOff.Reset() + } + + return fmt.Errorf("client connection exited") + } + + err := backoff.Retry(runOperation, backOff) + if err != nil { + log.Errorf("received an error when trying to connect: %v", err) + } +} + +// getConnectWithBackoff returns a backoff with exponential backoff strategy for connection retries +func getConnectWithBackoff(ctx context.Context) backoff.BackOff { + return backoff.WithContext(&backoff.ExponentialBackOff{ + InitialInterval: 5 * time.Minute, + RandomizationFactor: 1, + Multiplier: 1.7, + MaxInterval: 60 * time.Minute, + MaxElapsedTime: 7 * 24 * time.Hour, // 7 days + Stop: backoff.Stop, + Clock: backoff.SystemClock, + }, ctx) +} + // loginAttempt attempts to login using the provided information. it returns a status in case something fails func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) { var status internal.StatusType @@ -445,12 +489,9 @@ func (s *Server) Up(callerCtx context.Context, _ *proto.UpRequest) (*proto.UpRes s.statusRecorder.UpdateManagementAddress(s.config.ManagementURL.String()) s.statusRecorder.UpdateRosenpass(s.config.RosenpassEnabled, s.config.RosenpassPermissive) - go func() { - if err := internal.RunClientWithProbes(ctx, s.config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe); err != nil { - log.Errorf("run client connection: %v", err) - return - } - }() + s.cancelRetry = false + + go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) return &proto.UpResponse{}, nil } @@ -467,6 +508,8 @@ func (s *Server) Down(_ context.Context, _ *proto.DownRequest) (*proto.DownRespo state := internal.CtxGetState(s.rootCtx) state.Set(internal.StatusIdle) + s.cancelRetry = true + return &proto.DownResponse{}, nil } From 8585311cba1ff450d8a0ff6bf0b78c24408cd44d Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Sun, 10 Mar 2024 19:02:48 +0100 Subject: [PATCH 2/8] add routine to update retry starter indicator --- client/server/server.go | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index 68edae381c3..e6f51b5156d 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -45,13 +45,13 @@ type Server struct { statusRecorder *peer.Status sessionWatcher *internal.SessionWatcher - mgmProbe *internal.Probe - signalProbe *internal.Probe - relayProbe *internal.Probe - wgProbe *internal.Probe - lastProbe time.Time - retryStartedOnce bool - cancelRetry bool + mgmProbe *internal.Probe + signalProbe *internal.Probe + relayProbe *internal.Probe + wgProbe *internal.Probe + lastProbe time.Time + retryStarted bool + cancelRetry bool } type oauthAuthFlow struct { @@ -141,6 +141,26 @@ func (s *Server) Start() error { func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Config, statusRecorder *peer.Status, mgmProbe *internal.Probe, signalProbe *internal.Probe, relayProbe *internal.Probe, wgProbe *internal.Probe) { backOff := getConnectWithBackoff(ctx) + updateRetryCounterChan := make(chan struct{}) + + go func() { + t := time.NewTicker(24 * time.Hour) + for { + select { + case <-ctx.Done(): + return + case <-updateRetryCounterChan: + return + case <-t.C: + if !s.cancelRetry && s.retryStarted { + mgmtState := statusRecorder.GetManagementState() + if mgmtState.Connected { + s.retryStarted = false + } + } + } + } + }() runOperation := func() error { err := internal.RunClientWithProbes(ctx, config, statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe) @@ -152,8 +172,8 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf return backoff.Permanent(err) } - if !s.retryStartedOnce { - s.retryStartedOnce = true + if !s.retryStarted { + s.retryStarted = true backOff.Reset() } @@ -164,6 +184,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf if err != nil { log.Errorf("received an error when trying to connect: %v", err) } + close(updateRetryCounterChan) } // getConnectWithBackoff returns a backoff with exponential backoff strategy for connection retries From ab208985dda81f7a88d0d57282997d32efd0eef1 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 11 Mar 2024 19:30:48 +0100 Subject: [PATCH 3/8] cleanup, adjust times and log levels --- client/server/server.go | 49 +++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index e6f51b5156d..ad693368e77 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -45,13 +45,11 @@ type Server struct { statusRecorder *peer.Status sessionWatcher *internal.SessionWatcher - mgmProbe *internal.Probe - signalProbe *internal.Probe - relayProbe *internal.Probe - wgProbe *internal.Probe - lastProbe time.Time - retryStarted bool - cancelRetry bool + mgmProbe *internal.Probe + signalProbe *internal.Probe + relayProbe *internal.Probe + wgProbe *internal.Probe + lastProbe time.Time } type oauthAuthFlow struct { @@ -141,21 +139,25 @@ func (s *Server) Start() error { func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Config, statusRecorder *peer.Status, mgmProbe *internal.Probe, signalProbe *internal.Probe, relayProbe *internal.Probe, wgProbe *internal.Probe) { backOff := getConnectWithBackoff(ctx) - updateRetryCounterChan := make(chan struct{}) + retryStarted := false go func() { t := time.NewTicker(24 * time.Hour) for { select { case <-ctx.Done(): - return - case <-updateRetryCounterChan: + t.Stop() return case <-t.C: - if !s.cancelRetry && s.retryStarted { + if retryStarted { + mgmtState := statusRecorder.GetManagementState() - if mgmtState.Connected { - s.retryStarted = false + signalState := statusRecorder.GetSignalState() + if mgmtState.Connected && signalState.Connected { + log.Tracef("reseting status") + retryStarted = false + } else { + log.Tracef("not resetting status: mgmt: %v, signal: %v", mgmtState.Connected, signalState.Connected) } } } @@ -163,28 +165,31 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf }() runOperation := func() error { + log.Tracef("running client connection") err := internal.RunClientWithProbes(ctx, config, statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe) if err != nil { - log.Errorf("run client connection: %v", err) + log.Debugf("run client connection exited with error: %v. Will retry in the background", err) } - if s.cancelRetry || config.DisableAutoConnect { + if config.DisableAutoConnect { return backoff.Permanent(err) } - if !s.retryStarted { - s.retryStarted = true + if !retryStarted { + retryStarted = true backOff.Reset() } + log.Tracef("client connection exited") return fmt.Errorf("client connection exited") } err := backoff.Retry(runOperation, backOff) - if err != nil { + if s, ok := gstatus.FromError(err); ok && s.Code() != codes.Canceled { log.Errorf("received an error when trying to connect: %v", err) + } else { + log.Tracef("retry canceled") } - close(updateRetryCounterChan) } // getConnectWithBackoff returns a backoff with exponential backoff strategy for connection retries @@ -194,7 +199,7 @@ func getConnectWithBackoff(ctx context.Context) backoff.BackOff { RandomizationFactor: 1, Multiplier: 1.7, MaxInterval: 60 * time.Minute, - MaxElapsedTime: 7 * 24 * time.Hour, // 7 days + MaxElapsedTime: 14 * 24 * time.Hour, // 14 days Stop: backoff.Stop, Clock: backoff.SystemClock, }, ctx) @@ -510,8 +515,6 @@ func (s *Server) Up(callerCtx context.Context, _ *proto.UpRequest) (*proto.UpRes s.statusRecorder.UpdateManagementAddress(s.config.ManagementURL.String()) s.statusRecorder.UpdateRosenpass(s.config.RosenpassEnabled, s.config.RosenpassPermissive) - s.cancelRetry = false - go s.connectWithRetryRuns(ctx, s.config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) return &proto.UpResponse{}, nil @@ -529,8 +532,6 @@ func (s *Server) Down(_ context.Context, _ *proto.DownRequest) (*proto.DownRespo state := internal.CtxGetState(s.rootCtx) state.Set(internal.StatusIdle) - s.cancelRetry = true - return &proto.DownResponse{}, nil } From 69f3dbca1d8b06e47bf49dc4d6c27b19a3e6b669 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 11 Mar 2024 19:32:17 +0100 Subject: [PATCH 4/8] resetting --- client/server/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/server/server.go b/client/server/server.go index ad693368e77..1d60ec8aef4 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -154,7 +154,7 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf mgmtState := statusRecorder.GetManagementState() signalState := statusRecorder.GetSignalState() if mgmtState.Connected && signalState.Connected { - log.Tracef("reseting status") + log.Tracef("resetting status") retryStarted = false } else { log.Tracef("not resetting status: mgmt: %v, signal: %v", mgmtState.Connected, signalState.Connected) From 5f134acaf51fd76fd32421f0ebebd2eeca7567f6 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 11 Mar 2024 19:44:33 +0100 Subject: [PATCH 5/8] adding environment variables support --- client/server/server.go | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/client/server/server.go b/client/server/server.go index 1d60ec8aef4..d013acef2f5 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -3,6 +3,7 @@ package server import ( "context" "fmt" + "os" "os/exec" "runtime" "sync" @@ -25,7 +26,15 @@ import ( "github.com/netbirdio/netbird/version" ) -const probeThreshold = time.Second * 5 +const ( + probeThreshold = time.Second * 5 + retryInitialIntervalVar = "NB_CONN_RETRY_INTERVAL_TIME" + maxRetryInterval = "NB_CONN_MAX_RETRY_INTERVAL_TIME" + maxRetryTime = "NB_CONN_MAX_RETRY_TIME_TIME" + defaultInitialRetryTime = 14 * 24 * time.Hour + defaultMaxRetryInterval = 60 * time.Minute + defaultMaxRetryTime = 14 * 24 * time.Hour +) // Server for service control. type Server struct { @@ -194,17 +203,31 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf // getConnectWithBackoff returns a backoff with exponential backoff strategy for connection retries func getConnectWithBackoff(ctx context.Context) backoff.BackOff { + initialInterval := parseEnvDuration(retryInitialIntervalVar, defaultInitialRetryTime) + maxInterval := parseEnvDuration(maxRetryInterval, defaultMaxRetryInterval) + maxElapsedTime := parseEnvDuration(maxRetryTime, defaultMaxRetryTime) return backoff.WithContext(&backoff.ExponentialBackOff{ - InitialInterval: 5 * time.Minute, + InitialInterval: initialInterval, RandomizationFactor: 1, Multiplier: 1.7, - MaxInterval: 60 * time.Minute, - MaxElapsedTime: 14 * 24 * time.Hour, // 14 days + MaxInterval: maxInterval, + MaxElapsedTime: maxElapsedTime, // 14 days Stop: backoff.Stop, Clock: backoff.SystemClock, }, ctx) } +// parseEnvDuration parses the environment variable and returns the duration +func parseEnvDuration(envVar string, defaultDuration time.Duration) time.Duration { + if envValue := os.Getenv(envVar); envValue != "" { + if duration, err := time.ParseDuration(envValue); err == nil { + return duration + } + log.Warnf("unable to parse environment variable %s: %s. using default: %s", envVar, envValue, defaultDuration) + } + return defaultDuration +} + // loginAttempt attempts to login using the provided information. it returns a status in case something fails func (s *Server) loginAttempt(ctx context.Context, setupKey, jwtToken string) (internal.StatusType, error) { var status internal.StatusType From 94a79f3d7fca243836aec37e4a8d3c73c8635034 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Mon, 11 Mar 2024 20:59:08 +0100 Subject: [PATCH 6/8] add tests --- client/server/server.go | 25 ++++-- client/server/server_test.go | 168 +++++++++++++++++++++++++++++++++++ 2 files changed, 188 insertions(+), 5 deletions(-) create mode 100644 client/server/server_test.go diff --git a/client/server/server.go b/client/server/server.go index d013acef2f5..90b5bcb642c 100644 --- a/client/server/server.go +++ b/client/server/server.go @@ -6,6 +6,7 @@ import ( "os" "os/exec" "runtime" + "strconv" "sync" "time" @@ -29,11 +30,13 @@ import ( const ( probeThreshold = time.Second * 5 retryInitialIntervalVar = "NB_CONN_RETRY_INTERVAL_TIME" - maxRetryInterval = "NB_CONN_MAX_RETRY_INTERVAL_TIME" - maxRetryTime = "NB_CONN_MAX_RETRY_TIME_TIME" + maxRetryIntervalVar = "NB_CONN_MAX_RETRY_INTERVAL_TIME" + maxRetryTimeVar = "NB_CONN_MAX_RETRY_TIME_TIME" + retryMultiplierVar = "NB_CONN_RETRY_MULTIPLIER" defaultInitialRetryTime = 14 * 24 * time.Hour defaultMaxRetryInterval = 60 * time.Minute defaultMaxRetryTime = 14 * 24 * time.Hour + defaultRetryMultiplier = 1.7 ) // Server for service control. @@ -204,12 +207,24 @@ func (s *Server) connectWithRetryRuns(ctx context.Context, config *internal.Conf // getConnectWithBackoff returns a backoff with exponential backoff strategy for connection retries func getConnectWithBackoff(ctx context.Context) backoff.BackOff { initialInterval := parseEnvDuration(retryInitialIntervalVar, defaultInitialRetryTime) - maxInterval := parseEnvDuration(maxRetryInterval, defaultMaxRetryInterval) - maxElapsedTime := parseEnvDuration(maxRetryTime, defaultMaxRetryTime) + maxInterval := parseEnvDuration(maxRetryIntervalVar, defaultMaxRetryInterval) + maxElapsedTime := parseEnvDuration(maxRetryTimeVar, defaultMaxRetryTime) + multiplier := defaultRetryMultiplier + + if envValue := os.Getenv(retryMultiplierVar); envValue != "" { + // parse the multiplier from the environment variable string value to float64 + value, err := strconv.ParseFloat(envValue, 64) + if err != nil { + log.Warnf("unable to parse environment variable %s: %s. using default: %f", retryMultiplierVar, envValue, multiplier) + } else { + multiplier = value + } + } + return backoff.WithContext(&backoff.ExponentialBackOff{ InitialInterval: initialInterval, RandomizationFactor: 1, - Multiplier: 1.7, + Multiplier: multiplier, MaxInterval: maxInterval, MaxElapsedTime: maxElapsedTime, // 14 days Stop: backoff.Stop, diff --git a/client/server/server_test.go b/client/server/server_test.go new file mode 100644 index 00000000000..3b928d0f935 --- /dev/null +++ b/client/server/server_test.go @@ -0,0 +1,168 @@ +package server + +import ( + "context" + "net" + "os" + "testing" + "time" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" + + "github.com/netbirdio/netbird/client/internal" + "github.com/netbirdio/netbird/client/internal/peer" + mgmtProto "github.com/netbirdio/netbird/management/proto" + "github.com/netbirdio/netbird/management/server" + "github.com/netbirdio/netbird/management/server/activity" + "github.com/netbirdio/netbird/signal/proto" + signalServer "github.com/netbirdio/netbird/signal/server" +) + +var ( + kaep = keepalive.EnforcementPolicy{ + MinTime: 15 * time.Second, + PermitWithoutStream: true, + } + + kasp = keepalive.ServerParameters{ + MaxConnectionIdle: 15 * time.Second, + MaxConnectionAgeGrace: 5 * time.Second, + Time: 5 * time.Second, + Timeout: 2 * time.Second, + } +) + +// TestConnectWithRetryRuns checks that the connectWithRetry function runs and runs the retries according to the times specified via environment variables +// we will use a management server started via to simulate the server and capture the number of retries +func TestConnectWithRetryRuns(t *testing.T) { + // start the signal server + _, signalAddr, err := startSignal() + if err != nil { + t.Fatalf("failed to start signal server: %v", err) + } + + counter := 0 + // start the management server + _, mgmtAddr, err := startManagement(t, signalAddr, &counter) + if err != nil { + t.Fatalf("failed to start management server: %v", err) + } + + ctx := internal.CtxInitState(context.Background()) + + ctx, cancel := context.WithDeadline(ctx, time.Now().Add(30*time.Second)) + defer cancel() + // create new server + s := New(ctx, t.TempDir()+"/config.json", "debug") + s.latestConfigInput.ManagementURL = "http://" + mgmtAddr + config, err := internal.UpdateOrCreateConfig(s.latestConfigInput) + if err != nil { + t.Fatalf("failed to create config: %v", err) + } + s.config = config + + s.statusRecorder = peer.NewRecorder(config.ManagementURL.String()) + err = os.Setenv(retryInitialIntervalVar, "1s") + if err != nil { + t.Fatalf("failed to set env var: %v", err) + } + err = os.Setenv(maxRetryIntervalVar, "2s") + if err != nil { + t.Fatalf("failed to set env var: %v", err) + } + err = os.Setenv(maxRetryTimeVar, "5s") + if err != nil { + t.Fatalf("failed to set env var: %v", err) + } + err = os.Setenv(retryMultiplierVar, "1") + if err != nil { + t.Fatalf("failed to set env var: %v", err) + } + s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) + if counter < 2 || counter > 6 { + t.Fatalf("expected 2 < counter < 6, got %d", counter) + } +} + +type mockServer struct { + mgmtProto.ManagementServiceServer + counter *int +} + +func (m *mockServer) Login(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) { + *m.counter++ + return m.ManagementServiceServer.Login(ctx, req) +} + +func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Server, string, error) { + dataDir := t.TempDir() + + config := &server.Config{ + Stuns: []*server.Host{}, + TURNConfig: &server.TURNConfig{}, + Signal: &server.Host{ + Proto: "http", + URI: signalAddr, + }, + Datadir: dataDir, + HttpConfig: nil, + } + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, "", err + } + s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp)) + store, err := server.NewStoreFromJson(config.Datadir, nil) + if err != nil { + return nil, "", err + } + + peersUpdateManager := server.NewPeersUpdateManager(nil) + eventStore := &activity.InMemoryEventStore{} + if err != nil { + return nil, "", err + } + accountManager, err := server.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false) + if err != nil { + return nil, "", err + } + turnManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig) + mgmtServer, err := server.NewServer(config, accountManager, peersUpdateManager, turnManager, nil, nil) + if err != nil { + return nil, "", err + } + mock := &mockServer{ + ManagementServiceServer: mgmtServer, + counter: counter, + } + mgmtProto.RegisterManagementServiceServer(s, mock) + go func() { + if err = s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + }() + + return s, lis.Addr().String(), nil +} + +func startSignal() (*grpc.Server, string, error) { + s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp)) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + + proto.RegisterSignalExchangeServer(s, signalServer.NewServer()) + + go func() { + if err = s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + }() + + return s, lis.Addr().String(), nil +} From c7552b5ea54c6d09813ba40c1133aecd832df144 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 12 Mar 2024 10:47:30 +0100 Subject: [PATCH 7/8] use t.Setenv and t.Helper --- client/server/server_test.go | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/client/server/server_test.go b/client/server/server_test.go index 3b928d0f935..f45e3abbc8a 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -3,7 +3,6 @@ package server import ( "context" "net" - "os" "testing" "time" @@ -64,22 +63,11 @@ func TestConnectWithRetryRuns(t *testing.T) { s.config = config s.statusRecorder = peer.NewRecorder(config.ManagementURL.String()) - err = os.Setenv(retryInitialIntervalVar, "1s") - if err != nil { - t.Fatalf("failed to set env var: %v", err) - } - err = os.Setenv(maxRetryIntervalVar, "2s") - if err != nil { - t.Fatalf("failed to set env var: %v", err) - } - err = os.Setenv(maxRetryTimeVar, "5s") - if err != nil { - t.Fatalf("failed to set env var: %v", err) - } - err = os.Setenv(retryMultiplierVar, "1") - if err != nil { - t.Fatalf("failed to set env var: %v", err) - } + t.Setenv(retryInitialIntervalVar, "1s") + t.Setenv(maxRetryIntervalVar, "2s") + t.Setenv(maxRetryTimeVar, "5s") + t.Setenv(retryMultiplierVar, "1") + s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) if counter < 2 || counter > 6 { t.Fatalf("expected 2 < counter < 6, got %d", counter) @@ -97,6 +85,7 @@ func (m *mockServer) Login(ctx context.Context, req *mgmtProto.EncryptedMessage) } func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Server, string, error) { + t.Helper() dataDir := t.TempDir() config := &server.Config{ From 3e376e289595342e52098bc281f8c8f73bd079f1 Mon Sep 17 00:00:00 2001 From: Maycon Santos Date: Tue, 12 Mar 2024 11:08:20 +0100 Subject: [PATCH 8/8] update counter check --- client/server/server_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/server/server_test.go b/client/server/server_test.go index f45e3abbc8a..79a22002311 100644 --- a/client/server/server_test.go +++ b/client/server/server_test.go @@ -69,8 +69,8 @@ func TestConnectWithRetryRuns(t *testing.T) { t.Setenv(retryMultiplierVar, "1") s.connectWithRetryRuns(ctx, config, s.statusRecorder, s.mgmProbe, s.signalProbe, s.relayProbe, s.wgProbe) - if counter < 2 || counter > 6 { - t.Fatalf("expected 2 < counter < 6, got %d", counter) + if counter < 3 { + t.Fatalf("expected counter > 2, got %d", counter) } }