Skip to content

Commit

Permalink
refactor NearLimitRatio to environment variable (envoyproxy#186)
Browse files Browse the repository at this point in the history
Signed-off-by: zufardhiyaulhaq <zufardhiyaulhaq@gmail.com>
  • Loading branch information
zufardhiyaulhaq authored and timcovar committed Jan 16, 2024
1 parent 096d0c2 commit 5fbea6d
Show file tree
Hide file tree
Showing 6 changed files with 14 additions and 15 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ STAT:
* over_limit: Number of rule hits exceeding the threshold rate
* total_hits: Number of rule hits in total

These are examples of generated stats for some configured rate limit rules from the above examples:
To use a custom near_limit ratio threshold, you can specify with `NEAR_LIMIT_RATIO` environment variable. It defaults to `0.8` (0-1 scale). These are examples of generated stats for some configured rate limit rules from the above examples:

```
ratelimit.service.rate_limit.mongo_cps.database_default.over_limit: 0
Expand Down
5 changes: 0 additions & 5 deletions src/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,6 @@ import (
"golang.org/x/net/context"
)

// The NearLimitRation constant defines the ratio of total_hits over
// the Limit's RequestPerUnit that need to happen before triggering a near_limit
// stat increase
const NearLimitRatio = 0.8

// Errors that may be raised during config parsing.
type RateLimitConfigError string

Expand Down
9 changes: 6 additions & 3 deletions src/redis/cache_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ type rateLimitCacheImpl struct {
expirationJitterMaxSeconds int64
cacheKeyGenerator limiter.CacheKeyGenerator
localCache *freecache.Cache
nearLimitRatio float32
}

func max(a uint32, b uint32) uint32 {
Expand Down Expand Up @@ -149,7 +150,7 @@ func (this *rateLimitCacheImpl) DoLimit(
overLimitThreshold := limits[i].Limit.RequestsPerUnit
// The nearLimitThreshold is the number of requests that can be made before hitting the NearLimitRatio.
// We need to know it in both the OK and OVER_LIMIT scenarios.
nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * config.NearLimitRatio)))
nearLimitThreshold := uint32(math.Floor(float64(float32(overLimitThreshold) * this.nearLimitRatio)))

logger.Debugf("cache key: %s current: %d", cacheKey.Key, limitAfterIncrease)
if limitAfterIncrease > overLimitThreshold {
Expand Down Expand Up @@ -221,7 +222,7 @@ func CalculateReset(currentLimit *pb.RateLimitResponse_RateLimit, timeSource lim
return &duration.Duration{Seconds: sec - now%sec}
}

func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache) limiter.RateLimitCache {
func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource limiter.TimeSource, jitterRand *rand.Rand, expirationJitterMaxSeconds int64, localCache *freecache.Cache, nearLimitRatio float32) limiter.RateLimitCache {
return &rateLimitCacheImpl{
client: client,
perSecondClient: perSecondClient,
Expand All @@ -230,6 +231,7 @@ func NewRateLimitCacheImpl(client Client, perSecondClient Client, timeSource lim
expirationJitterMaxSeconds: expirationJitterMaxSeconds,
cacheKeyGenerator: limiter.NewCacheKeyGenerator(),
localCache: localCache,
nearLimitRatio: nearLimitRatio,
}
}

Expand All @@ -249,5 +251,6 @@ func NewRateLimiterCacheImplFromSettings(s settings.Settings, localCache *freeca
timeSource,
jitterRand,
expirationJitterMaxSeconds,
localCache)
localCache,
s.NearLimitRatio)
}
1 change: 1 addition & 0 deletions src/settings/settings.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ type Settings struct {
RedisPerSecondPipelineLimit int `envconfig:"REDIS_PERSECOND_PIPELINE_LIMIT" default:"0"`
ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"`
LocalCacheSizeInBytes int `envconfig:"LOCAL_CACHE_SIZE_IN_BYTES" default:"0"`
NearLimitRatio float32 `envconfig:"NEAR_LIMIT_RATIO" default:"0.8"`
}

type Option func(*Settings)
Expand Down
2 changes: 1 addition & 1 deletion test/redis/bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func BenchmarkParallelDoLimit(b *testing.B) {
client := redis.NewClientImpl(statsStore, false, "", "single", "127.0.0.1:6379", poolSize, pipelineWindow, pipelineLimit)
defer client.Close()

cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil)
cache := redis.NewRateLimitCacheImpl(client, nil, limiter.NewTimeSourceImpl(), rand.New(limiter.NewLockedSource(time.Now().Unix())), 10, nil, 0.8)
request := common.NewRateLimitRequest("domain", [][][2]string{{{"key", "value"}}}, 1)
limits := []*config.RateLimit{config.NewRateLimit(1000000000, pb.RateLimitResponse_RateLimit_SECOND, "key_value", statsStore)}

Expand Down
10 changes: 5 additions & 5 deletions test/redis/cache_impl_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) {
timeSource := mock_limiter.NewMockTimeSource(controller)
var cache limiter.RateLimitCache
if usePerSecondRedis {
cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil)
cache = redis.NewRateLimitCacheImpl(client, perSecondClient, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8)
} else {
cache = redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil)
cache = redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8)
}
statsStore := stats.NewStore(stats.NewNullSink(), false)

Expand Down Expand Up @@ -172,7 +172,7 @@ func TestOverLimitWithLocalCache(t *testing.T) {
client := mock_redis.NewMockClient(controller)
timeSource := mock_limiter.NewMockTimeSource(controller)
localCache := freecache.NewCache(100)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache, 0.8)
sink := &common.TestStatSink{}
statsStore := stats.NewStore(sink, true)
localCacheStats := limiter.NewLocalCacheStats(localCache, statsStore.Scope("localcache"))
Expand Down Expand Up @@ -264,7 +264,7 @@ func TestNearLimit(t *testing.T) {

client := mock_redis.NewMockClient(controller)
timeSource := mock_limiter.NewMockTimeSource(controller)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(rand.NewSource(1)), 0, nil, 0.8)
statsStore := stats.NewStore(stats.NewNullSink(), false)

// Test Near Limit Stats. Under Near Limit Ratio
Expand Down Expand Up @@ -424,7 +424,7 @@ func TestRedisWithJitter(t *testing.T) {
client := mock_redis.NewMockClient(controller)
timeSource := mock_limiter.NewMockTimeSource(controller)
jitterSource := mock_limiter.NewMockJitterRandSource(controller)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil)
cache := redis.NewRateLimitCacheImpl(client, nil, timeSource, rand.New(jitterSource), 3600, nil, 0.8)
statsStore := stats.NewStore(stats.NewNullSink(), false)

timeSource.EXPECT().UnixNow().Return(int64(1234)).MaxTimes(3)
Expand Down

0 comments on commit 5fbea6d

Please sign in to comment.