Skip to content

Commit

Permalink
fix: no retry if client ctx is done
Browse files Browse the repository at this point in the history
  • Loading branch information
rueian committed May 15, 2022
1 parent 4c68ce5 commit 89c0c53
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 12 deletions.
18 changes: 11 additions & 7 deletions cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,7 +259,7 @@ retry:
}
resp = cc.Do(ctx, cmd)
process:
if c.shouldRefreshRetry(resp.NonRedisError()) {
if c.shouldRefreshRetry(resp.NonRedisError(), ctx) {
goto retry
}
if err := resp.RedisError(); err != nil {
Expand Down Expand Up @@ -289,7 +289,7 @@ retry:
}
resp = cc.DoCache(ctx, cmd, ttl)
process:
if c.shouldRefreshRetry(resp.NonRedisError()) {
if c.shouldRefreshRetry(resp.NonRedisError(), ctx) {
goto retry
}
if err := resp.RedisError(); err != nil {
Expand Down Expand Up @@ -317,7 +317,7 @@ retry:
if err != nil {
goto ret
}
if err = cc.Receive(ctx, subscribe, fn); c.shouldRefreshRetry(err) {
if err = cc.Receive(ctx, subscribe, fn); c.shouldRefreshRetry(err, ctx) {
goto retry
}
ret:
Expand All @@ -341,8 +341,12 @@ func (c *clusterClient) Close() {
c.mu.RUnlock()
}

func (c *clusterClient) shouldRefreshRetry(err error) (should bool) {
func (c *clusterClient) shouldRefreshRetry(err error, ctx context.Context) (should bool) {
if should = (err == ErrClosing || err == context.DeadlineExceeded) && atomic.LoadUint32(&c.closed) == 0; should {
if ctx.Err() != nil {
go c.refresh()
return false
}
c.refresh()
}
return should
Expand Down Expand Up @@ -401,7 +405,7 @@ retry:
resp = newErrResult(err)
} else {
resp = wire.Do(ctx, cmd)
if c.client.shouldRefreshRetry(resp.NonRedisError()) {
if c.client.shouldRefreshRetry(resp.NonRedisError(), ctx) {
goto retry
}
}
Expand All @@ -420,7 +424,7 @@ retry:
if wire, err := c.acquire(); err == nil {
resp = wire.DoMulti(ctx, multi...)
for _, resp := range resp {
if c.client.shouldRefreshRetry(resp.NonRedisError()) {
if c.client.shouldRefreshRetry(resp.NonRedisError(), ctx) {
goto retry
}
}
Expand All @@ -441,7 +445,7 @@ func (c *dedicatedClusterClient) Receive(ctx context.Context, subscribe cmds.Com
var wire wire
retry:
if wire, err = c.acquire(); err == nil {
if err = wire.Receive(ctx, subscribe, fn); c.client.shouldRefreshRetry(err) {
if err = wire.Receive(ctx, subscribe, fn); c.client.shouldRefreshRetry(err, ctx) {
goto retry
}
}
Expand Down
10 changes: 5 additions & 5 deletions sentinel.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func (c *sentinelClient) B() cmds.Builder {
func (c *sentinelClient) Do(ctx context.Context, cmd cmds.Completed) (resp RedisResult) {
retry:
resp = c.mConn.Load().(conn).Do(ctx, cmd)
if c.shouldRetry(resp.NonRedisError()) {
if c.shouldRetry(resp.NonRedisError(), ctx) {
goto retry
}
cmds.Put(cmd.CommandSlice())
Expand All @@ -68,7 +68,7 @@ retry:
func (c *sentinelClient) DoCache(ctx context.Context, cmd cmds.Cacheable, ttl time.Duration) (resp RedisResult) {
retry:
resp = c.mConn.Load().(conn).DoCache(ctx, cmd, ttl)
if c.shouldRetry(resp.NonRedisError()) {
if c.shouldRetry(resp.NonRedisError(), ctx) {
goto retry
}
cmds.Put(cmd.CommandSlice())
Expand All @@ -77,7 +77,7 @@ retry:

func (c *sentinelClient) Receive(ctx context.Context, subscribe cmds.Completed, fn func(msg PubSubMessage)) (err error) {
retry:
if err = c.mConn.Load().(conn).Receive(ctx, subscribe, fn); c.shouldRetry(err) {
if err = c.mConn.Load().(conn).Receive(ctx, subscribe, fn); c.shouldRetry(err, ctx) {
goto retry
}
cmds.Put(subscribe.CommandSlice())
Expand All @@ -104,8 +104,8 @@ func (c *sentinelClient) Close() {
c.mu.Unlock()
}

func (c *sentinelClient) shouldRetry(err error) (should bool) {
if should = err == ErrClosing && atomic.LoadUint32(&c.closed) == 0; should {
func (c *sentinelClient) shouldRetry(err error, ctx context.Context) (should bool) {
if should = err == ErrClosing && atomic.LoadUint32(&c.closed) == 0 && ctx.Err() == nil; should {
runtime.Gosched()
}
return should
Expand Down

0 comments on commit 89c0c53

Please sign in to comment.