diff --git a/glide.lock b/glide.lock index cbe41b84..31cd7445 100644 --- a/glide.lock +++ b/glide.lock @@ -44,6 +44,8 @@ imports: version: 3d6c1e425f717ee59152524e73b904b67705eeb8 - name: github.com/kelseyhightower/envconfig version: ac12b1f15efba734211a556d8b125110dc538016 +- name: github.com/konsorten/go-windows-terminal-sequences + version: 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 - name: github.com/lyft/goruntime version: a0d6acf20fcfd48f53e623ed62b87ffb7fe17038 subpackages: @@ -57,7 +59,7 @@ imports: subpackages: - validate - name: github.com/mediocregopher/radix.v2 - version: 94360be262532d465b7e4760c7a67195d3319a87 + version: b67df6e626f993b64b3ca9f4b8630900e61002e3 subpackages: - pool - redis diff --git a/proto/ratelimit/ratelimit.pb.go b/proto/ratelimit/ratelimit.pb.go index 77f41dbb..1a0f25db 100644 --- a/proto/ratelimit/ratelimit.pb.go +++ b/proto/ratelimit/ratelimit.pb.go @@ -3,13 +3,12 @@ package ratelimit -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "golang.org/x/net/context" + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" grpc "google.golang.org/grpc" + math "math" ) // Reference imports to suppress errors if they are not otherwise used. @@ -40,6 +39,7 @@ var RateLimit_Unit_name = map[int32]string{ 3: "HOUR", 4: "DAY", } + var RateLimit_Unit_value = map[string]int32{ "UNKNOWN": 0, "SECOND": 1, @@ -51,8 +51,9 @@ var RateLimit_Unit_value = map[string]int32{ func (x RateLimit_Unit) String() string { return proto.EnumName(RateLimit_Unit_name, int32(x)) } + func (RateLimit_Unit) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2, 0} + return fileDescriptor_5009d3be233d3ead, []int{2, 0} } type RateLimitResponse_Code int32 @@ -68,6 +69,7 @@ var RateLimitResponse_Code_name = map[int32]string{ 1: "OK", 2: "OVER_LIMIT", } + var RateLimitResponse_Code_value = map[string]int32{ "UNKNOWN": 0, "OK": 1, @@ -77,8 +79,9 @@ var RateLimitResponse_Code_value = map[string]int32{ func (x RateLimitResponse_Code) String() string { return proto.EnumName(RateLimitResponse_Code_name, int32(x)) } + func (RateLimitResponse_Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} + return fileDescriptor_5009d3be233d3ead, []int{3, 0} } // Main message for a rate limit request. The rate limit service is designed to be fully generic @@ -108,16 +111,17 @@ func (m *RateLimitRequest) Reset() { *m = RateLimitRequest{} } func (m *RateLimitRequest) String() string { return proto.CompactTextString(m) } func (*RateLimitRequest) ProtoMessage() {} func (*RateLimitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{0} + return fileDescriptor_5009d3be233d3ead, []int{0} } + func (m *RateLimitRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimitRequest.Unmarshal(m, b) } func (m *RateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimitRequest.Marshal(b, m, deterministic) } -func (dst *RateLimitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitRequest.Merge(dst, src) +func (m *RateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitRequest.Merge(m, src) } func (m *RateLimitRequest) XXX_Size() int { return xxx_messageInfo_RateLimitRequest.Size(m) @@ -181,16 +185,17 @@ func (m *RateLimitDescriptor) Reset() { *m = RateLimitDescriptor{} } func (m *RateLimitDescriptor) String() string { return proto.CompactTextString(m) } func (*RateLimitDescriptor) ProtoMessage() {} func (*RateLimitDescriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1} + return fileDescriptor_5009d3be233d3ead, []int{1} } + func (m *RateLimitDescriptor) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimitDescriptor.Unmarshal(m, b) } func (m *RateLimitDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimitDescriptor.Marshal(b, m, deterministic) } -func (dst *RateLimitDescriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor.Merge(dst, src) +func (m *RateLimitDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitDescriptor.Merge(m, src) } func (m *RateLimitDescriptor) XXX_Size() int { return xxx_messageInfo_RateLimitDescriptor.Size(m) @@ -220,16 +225,17 @@ func (m *RateLimitDescriptor_Entry) Reset() { *m = RateLimitDescriptor_E func (m *RateLimitDescriptor_Entry) String() string { return proto.CompactTextString(m) } func (*RateLimitDescriptor_Entry) ProtoMessage() {} func (*RateLimitDescriptor_Entry) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{1, 0} + return fileDescriptor_5009d3be233d3ead, []int{1, 0} } + func (m *RateLimitDescriptor_Entry) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimitDescriptor_Entry.Unmarshal(m, b) } func (m *RateLimitDescriptor_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimitDescriptor_Entry.Marshal(b, m, deterministic) } -func (dst *RateLimitDescriptor_Entry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitDescriptor_Entry.Merge(dst, src) +func (m *RateLimitDescriptor_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitDescriptor_Entry.Merge(m, src) } func (m *RateLimitDescriptor_Entry) XXX_Size() int { return xxx_messageInfo_RateLimitDescriptor_Entry.Size(m) @@ -267,16 +273,17 @@ func (m *RateLimit) Reset() { *m = RateLimit{} } func (m *RateLimit) String() string { return proto.CompactTextString(m) } func (*RateLimit) ProtoMessage() {} func (*RateLimit) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{2} + return fileDescriptor_5009d3be233d3ead, []int{2} } + func (m *RateLimit) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimit.Unmarshal(m, b) } func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) } -func (dst *RateLimit) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimit.Merge(dst, src) +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) } func (m *RateLimit) XXX_Size() int { return xxx_messageInfo_RateLimit.Size(m) @@ -319,16 +326,17 @@ func (m *RateLimitResponse) Reset() { *m = RateLimitResponse{} } func (m *RateLimitResponse) String() string { return proto.CompactTextString(m) } func (*RateLimitResponse) ProtoMessage() {} func (*RateLimitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3} + return fileDescriptor_5009d3be233d3ead, []int{3} } + func (m *RateLimitResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimitResponse.Unmarshal(m, b) } func (m *RateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimitResponse.Marshal(b, m, deterministic) } -func (dst *RateLimitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse.Merge(dst, src) +func (m *RateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitResponse.Merge(m, src) } func (m *RateLimitResponse) XXX_Size() int { return xxx_messageInfo_RateLimitResponse.Size(m) @@ -369,16 +377,17 @@ func (m *RateLimitResponse_DescriptorStatus) Reset() { *m = RateLimitRes func (m *RateLimitResponse_DescriptorStatus) String() string { return proto.CompactTextString(m) } func (*RateLimitResponse_DescriptorStatus) ProtoMessage() {} func (*RateLimitResponse_DescriptorStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_ratelimit_8ec600a45de499be, []int{3, 0} + return fileDescriptor_5009d3be233d3ead, []int{3, 0} } + func (m *RateLimitResponse_DescriptorStatus) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Unmarshal(m, b) } func (m *RateLimitResponse_DescriptorStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Marshal(b, m, deterministic) } -func (dst *RateLimitResponse_DescriptorStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_RateLimitResponse_DescriptorStatus.Merge(dst, src) +func (m *RateLimitResponse_DescriptorStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitResponse_DescriptorStatus.Merge(m, src) } func (m *RateLimitResponse_DescriptorStatus) XXX_Size() int { return xxx_messageInfo_RateLimitResponse_DescriptorStatus.Size(m) @@ -411,14 +420,54 @@ func (m *RateLimitResponse_DescriptorStatus) GetLimitRemaining() uint32 { } func init() { + proto.RegisterEnum("pb.lyft.ratelimit.RateLimit_Unit", RateLimit_Unit_name, RateLimit_Unit_value) + proto.RegisterEnum("pb.lyft.ratelimit.RateLimitResponse_Code", RateLimitResponse_Code_name, RateLimitResponse_Code_value) proto.RegisterType((*RateLimitRequest)(nil), "pb.lyft.ratelimit.RateLimitRequest") proto.RegisterType((*RateLimitDescriptor)(nil), "pb.lyft.ratelimit.RateLimitDescriptor") proto.RegisterType((*RateLimitDescriptor_Entry)(nil), "pb.lyft.ratelimit.RateLimitDescriptor.Entry") proto.RegisterType((*RateLimit)(nil), "pb.lyft.ratelimit.RateLimit") proto.RegisterType((*RateLimitResponse)(nil), "pb.lyft.ratelimit.RateLimitResponse") proto.RegisterType((*RateLimitResponse_DescriptorStatus)(nil), "pb.lyft.ratelimit.RateLimitResponse.DescriptorStatus") - proto.RegisterEnum("pb.lyft.ratelimit.RateLimit_Unit", RateLimit_Unit_name, RateLimit_Unit_value) - proto.RegisterEnum("pb.lyft.ratelimit.RateLimitResponse_Code", RateLimitResponse_Code_name, RateLimitResponse_Code_value) +} + +func init() { proto.RegisterFile("proto/ratelimit/ratelimit.proto", fileDescriptor_5009d3be233d3ead) } + +var fileDescriptor_5009d3be233d3ead = []byte{ + // 532 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x8e, 0xd2, 0x40, + 0x14, 0xde, 0xa1, 0x5d, 0x58, 0x4e, 0x17, 0x28, 0xa3, 0x31, 0x84, 0x98, 0x2c, 0x56, 0xa3, 0xf8, + 0x93, 0x6e, 0x82, 0xd9, 0x4b, 0x4d, 0x70, 0xc1, 0x2c, 0x59, 0x16, 0x74, 0x58, 0x34, 0x7a, 0x61, + 0xd3, 0xa5, 0x47, 0xb7, 0xb1, 0xdb, 0xe2, 0xcc, 0x94, 0x84, 0x3b, 0x9f, 0xc0, 0x3b, 0x1f, 0xc0, + 0x17, 0xf0, 0x0d, 0x7c, 0x37, 0xd3, 0xa1, 0x14, 0xfc, 0x09, 0x21, 0x7b, 0x77, 0xfe, 0xbe, 0xef, + 0x9c, 0x9e, 0xef, 0x4c, 0xe1, 0x60, 0xca, 0x23, 0x19, 0x1d, 0x72, 0x57, 0x62, 0xe0, 0x5f, 0xf9, + 0x72, 0x65, 0xd9, 0x2a, 0x43, 0xab, 0xd3, 0x0b, 0x3b, 0x98, 0x7f, 0x94, 0x76, 0x96, 0xb0, 0xbe, + 0x13, 0x30, 0x99, 0x2b, 0xb1, 0x9f, 0x78, 0x0c, 0xbf, 0xc4, 0x28, 0x24, 0xbd, 0x05, 0x79, 0x2f, + 0xba, 0x72, 0xfd, 0xb0, 0x46, 0x1a, 0xa4, 0x59, 0x64, 0xa9, 0x47, 0x4f, 0xc0, 0xf0, 0x50, 0x4c, + 0xb8, 0x3f, 0x95, 0x11, 0x17, 0xb5, 0x5c, 0x43, 0x6b, 0x1a, 0xad, 0xfb, 0xf6, 0x3f, 0xac, 0x76, + 0xc6, 0xd8, 0xc9, 0xca, 0xd9, 0x3a, 0x94, 0x1e, 0x80, 0x71, 0xe9, 0x4b, 0xe1, 0xb8, 0x9e, 0x87, + 0xa1, 0x57, 0xd3, 0x1a, 0xa4, 0x59, 0x62, 0x90, 0x84, 0xda, 0x2a, 0x62, 0x7d, 0x23, 0x70, 0xe3, + 0x3f, 0x2c, 0xf4, 0x25, 0x14, 0x30, 0x94, 0xdc, 0x47, 0x51, 0x23, 0xaa, 0xfd, 0x93, 0xed, 0xda, + 0xdb, 0xdd, 0x50, 0xf2, 0x39, 0x5b, 0x82, 0xeb, 0x87, 0xb0, 0xab, 0x22, 0xd4, 0x04, 0xed, 0x33, + 0xce, 0xd3, 0x0f, 0x4d, 0x4c, 0x7a, 0x13, 0x76, 0x67, 0x6e, 0x10, 0x63, 0x2d, 0xa7, 0x62, 0x0b, + 0xc7, 0xfa, 0x49, 0xa0, 0x98, 0xf1, 0xd2, 0x47, 0x50, 0xe5, 0x8b, 0x65, 0x09, 0x67, 0x8a, 0xdc, + 0x89, 0x43, 0x5f, 0x2a, 0x8e, 0x12, 0xab, 0x2c, 0x13, 0xaf, 0x90, 0x8f, 0x43, 0x5f, 0xd2, 0x23, + 0xd0, 0x55, 0x3a, 0xa1, 0x2b, 0xb7, 0xee, 0x6c, 0x9a, 0xd7, 0x4e, 0x00, 0x4c, 0x95, 0x5b, 0xcf, + 0x41, 0x57, 0x70, 0x03, 0x0a, 0xe3, 0xc1, 0xe9, 0x60, 0xf8, 0x76, 0x60, 0xee, 0x50, 0x80, 0xfc, + 0xa8, 0x7b, 0x3c, 0x1c, 0x74, 0x4c, 0x92, 0xd8, 0x67, 0xbd, 0xc1, 0xf8, 0xbc, 0x6b, 0xe6, 0xe8, + 0x1e, 0xe8, 0x27, 0xc3, 0x31, 0x33, 0x35, 0x5a, 0x00, 0xad, 0xd3, 0x7e, 0x67, 0xea, 0xd6, 0x0f, + 0x0d, 0xaa, 0x6b, 0xca, 0x8a, 0x69, 0x14, 0x0a, 0xa4, 0x7d, 0xd8, 0x8f, 0x66, 0xc8, 0xdd, 0x20, + 0x70, 0x26, 0x91, 0x87, 0x6a, 0xe6, 0x72, 0xeb, 0xe1, 0xa6, 0xa1, 0x96, 0x58, 0xfb, 0x38, 0xf2, + 0x90, 0x19, 0x29, 0x3c, 0x71, 0xe8, 0x6b, 0xd8, 0x13, 0xd2, 0x95, 0xb1, 0xc0, 0xe5, 0x35, 0x1c, + 0x6d, 0xc5, 0xb4, 0xd2, 0x65, 0xa4, 0xe0, 0x2c, 0xa3, 0xa9, 0xff, 0x22, 0x60, 0xfe, 0x9d, 0xa6, + 0xcf, 0x40, 0xbf, 0xde, 0xb4, 0x0a, 0x46, 0xdb, 0x50, 0x9a, 0xc4, 0x9c, 0x63, 0x28, 0x1d, 0x55, + 0xad, 0xa4, 0x30, 0x5a, 0xb7, 0x37, 0xf2, 0xec, 0xa7, 0x90, 0x85, 0xe0, 0x0f, 0xa0, 0xa2, 0x0a, + 0x1c, 0x8e, 0xc9, 0x53, 0xf0, 0xc3, 0x4f, 0xe9, 0xd1, 0x96, 0x83, 0x45, 0xd7, 0x34, 0x6a, 0x3d, + 0x06, 0x5d, 0xad, 0xe6, 0x0f, 0xd9, 0xf2, 0x90, 0x1b, 0x9e, 0x9a, 0x84, 0x96, 0x01, 0x86, 0x6f, + 0xba, 0xcc, 0xe9, 0xf7, 0xce, 0x7a, 0xe7, 0x66, 0xae, 0xc5, 0xd7, 0x1e, 0xdf, 0x08, 0xf9, 0xcc, + 0x9f, 0x20, 0xfd, 0x00, 0x95, 0xd1, 0x65, 0x14, 0x07, 0xde, 0xea, 0xda, 0xee, 0x6e, 0xfe, 0x60, + 0x75, 0x6e, 0xf5, 0x7b, 0xdb, 0x6c, 0xc5, 0xda, 0x79, 0x51, 0x7e, 0x5f, 0xcc, 0x0a, 0xbe, 0x12, + 0x72, 0x91, 0x57, 0xff, 0x86, 0xa7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x0e, 0xa0, 0x99, + 0x3e, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -494,45 +543,3 @@ var _RateLimitService_serviceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "proto/ratelimit/ratelimit.proto", } - -func init() { - proto.RegisterFile("proto/ratelimit/ratelimit.proto", fileDescriptor_ratelimit_8ec600a45de499be) -} - -var fileDescriptor_ratelimit_8ec600a45de499be = []byte{ - // 532 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xdd, 0x8e, 0xd2, 0x40, - 0x14, 0xde, 0xa1, 0x5d, 0x58, 0x4e, 0x17, 0x28, 0xa3, 0x31, 0x84, 0x98, 0x2c, 0x56, 0xa3, 0xf8, - 0x93, 0x6e, 0x82, 0xd9, 0x4b, 0x4d, 0x70, 0xc1, 0x2c, 0x59, 0x16, 0x74, 0x58, 0x34, 0x7a, 0x61, - 0xd3, 0xa5, 0x47, 0xb7, 0xb1, 0xdb, 0xe2, 0xcc, 0x94, 0x84, 0x3b, 0x9f, 0xc0, 0x3b, 0x1f, 0xc0, - 0x17, 0xf0, 0x0d, 0x7c, 0x37, 0xd3, 0xa1, 0x14, 0xfc, 0x09, 0x21, 0x7b, 0x77, 0xfe, 0xbe, 0xef, - 0x9c, 0x9e, 0xef, 0x4c, 0xe1, 0x60, 0xca, 0x23, 0x19, 0x1d, 0x72, 0x57, 0x62, 0xe0, 0x5f, 0xf9, - 0x72, 0x65, 0xd9, 0x2a, 0x43, 0xab, 0xd3, 0x0b, 0x3b, 0x98, 0x7f, 0x94, 0x76, 0x96, 0xb0, 0xbe, - 0x13, 0x30, 0x99, 0x2b, 0xb1, 0x9f, 0x78, 0x0c, 0xbf, 0xc4, 0x28, 0x24, 0xbd, 0x05, 0x79, 0x2f, - 0xba, 0x72, 0xfd, 0xb0, 0x46, 0x1a, 0xa4, 0x59, 0x64, 0xa9, 0x47, 0x4f, 0xc0, 0xf0, 0x50, 0x4c, - 0xb8, 0x3f, 0x95, 0x11, 0x17, 0xb5, 0x5c, 0x43, 0x6b, 0x1a, 0xad, 0xfb, 0xf6, 0x3f, 0xac, 0x76, - 0xc6, 0xd8, 0xc9, 0xca, 0xd9, 0x3a, 0x94, 0x1e, 0x80, 0x71, 0xe9, 0x4b, 0xe1, 0xb8, 0x9e, 0x87, - 0xa1, 0x57, 0xd3, 0x1a, 0xa4, 0x59, 0x62, 0x90, 0x84, 0xda, 0x2a, 0x62, 0x7d, 0x23, 0x70, 0xe3, - 0x3f, 0x2c, 0xf4, 0x25, 0x14, 0x30, 0x94, 0xdc, 0x47, 0x51, 0x23, 0xaa, 0xfd, 0x93, 0xed, 0xda, - 0xdb, 0xdd, 0x50, 0xf2, 0x39, 0x5b, 0x82, 0xeb, 0x87, 0xb0, 0xab, 0x22, 0xd4, 0x04, 0xed, 0x33, - 0xce, 0xd3, 0x0f, 0x4d, 0x4c, 0x7a, 0x13, 0x76, 0x67, 0x6e, 0x10, 0x63, 0x2d, 0xa7, 0x62, 0x0b, - 0xc7, 0xfa, 0x49, 0xa0, 0x98, 0xf1, 0xd2, 0x47, 0x50, 0xe5, 0x8b, 0x65, 0x09, 0x67, 0x8a, 0xdc, - 0x89, 0x43, 0x5f, 0x2a, 0x8e, 0x12, 0xab, 0x2c, 0x13, 0xaf, 0x90, 0x8f, 0x43, 0x5f, 0xd2, 0x23, - 0xd0, 0x55, 0x3a, 0xa1, 0x2b, 0xb7, 0xee, 0x6c, 0x9a, 0xd7, 0x4e, 0x00, 0x4c, 0x95, 0x5b, 0xcf, - 0x41, 0x57, 0x70, 0x03, 0x0a, 0xe3, 0xc1, 0xe9, 0x60, 0xf8, 0x76, 0x60, 0xee, 0x50, 0x80, 0xfc, - 0xa8, 0x7b, 0x3c, 0x1c, 0x74, 0x4c, 0x92, 0xd8, 0x67, 0xbd, 0xc1, 0xf8, 0xbc, 0x6b, 0xe6, 0xe8, - 0x1e, 0xe8, 0x27, 0xc3, 0x31, 0x33, 0x35, 0x5a, 0x00, 0xad, 0xd3, 0x7e, 0x67, 0xea, 0xd6, 0x0f, - 0x0d, 0xaa, 0x6b, 0xca, 0x8a, 0x69, 0x14, 0x0a, 0xa4, 0x7d, 0xd8, 0x8f, 0x66, 0xc8, 0xdd, 0x20, - 0x70, 0x26, 0x91, 0x87, 0x6a, 0xe6, 0x72, 0xeb, 0xe1, 0xa6, 0xa1, 0x96, 0x58, 0xfb, 0x38, 0xf2, - 0x90, 0x19, 0x29, 0x3c, 0x71, 0xe8, 0x6b, 0xd8, 0x13, 0xd2, 0x95, 0xb1, 0xc0, 0xe5, 0x35, 0x1c, - 0x6d, 0xc5, 0xb4, 0xd2, 0x65, 0xa4, 0xe0, 0x2c, 0xa3, 0xa9, 0xff, 0x22, 0x60, 0xfe, 0x9d, 0xa6, - 0xcf, 0x40, 0xbf, 0xde, 0xb4, 0x0a, 0x46, 0xdb, 0x50, 0x9a, 0xc4, 0x9c, 0x63, 0x28, 0x1d, 0x55, - 0xad, 0xa4, 0x30, 0x5a, 0xb7, 0x37, 0xf2, 0xec, 0xa7, 0x90, 0x85, 0xe0, 0x0f, 0xa0, 0xa2, 0x0a, - 0x1c, 0x8e, 0xc9, 0x53, 0xf0, 0xc3, 0x4f, 0xe9, 0xd1, 0x96, 0x83, 0x45, 0xd7, 0x34, 0x6a, 0x3d, - 0x06, 0x5d, 0xad, 0xe6, 0x0f, 0xd9, 0xf2, 0x90, 0x1b, 0x9e, 0x9a, 0x84, 0x96, 0x01, 0x86, 0x6f, - 0xba, 0xcc, 0xe9, 0xf7, 0xce, 0x7a, 0xe7, 0x66, 0xae, 0xc5, 0xd7, 0x1e, 0xdf, 0x08, 0xf9, 0xcc, - 0x9f, 0x20, 0xfd, 0x00, 0x95, 0xd1, 0x65, 0x14, 0x07, 0xde, 0xea, 0xda, 0xee, 0x6e, 0xfe, 0x60, - 0x75, 0x6e, 0xf5, 0x7b, 0xdb, 0x6c, 0xc5, 0xda, 0x79, 0x51, 0x7e, 0x5f, 0xcc, 0x0a, 0xbe, 0x12, - 0x72, 0x91, 0x57, 0xff, 0x86, 0xa7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x4d, 0x0e, 0xa0, 0x99, - 0x3e, 0x04, 0x00, 0x00, -} diff --git a/src/service/header_util.go b/src/service/header_util.go new file mode 100644 index 00000000..bab085c4 --- /dev/null +++ b/src/service/header_util.go @@ -0,0 +1,51 @@ +package ratelimit + +import ( + "strconv" + + "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" +) + +func limitHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { + return &core.HeaderValue{ + Key: "X-RateLimit-Limit", + Value: strconv.FormatUint(uint64(descriptor.CurrentLimit.RequestsPerUnit), 10), + } +} + +func remainingHeader(descriptor *pb.RateLimitResponse_DescriptorStatus) *core.HeaderValue { + return &core.HeaderValue{ + Key: "X-RateLimit-Remaining", + Value: strconv.FormatUint(uint64(descriptor.LimitRemaining), 10), + } +} + +func resetHeader( + descriptor *pb.RateLimitResponse_DescriptorStatus, now int64) *core.HeaderValue { + + return &core.HeaderValue{ + Key: "X-RateLimit-Reset", + Value: strconv.FormatInt(calculateReset(descriptor, now), 10), + } +} + +func calculateReset(descriptor *pb.RateLimitResponse_DescriptorStatus, now int64) int64 { + sec := unitInSeconds(descriptor.CurrentLimit.Unit) + return sec - now%sec +} + +func unitInSeconds(unit pb.RateLimitResponse_RateLimit_Unit) int64 { + switch unit { + case pb.RateLimitResponse_RateLimit_SECOND: + return 1 + case pb.RateLimitResponse_RateLimit_MINUTE: + return 60 + case pb.RateLimitResponse_RateLimit_HOUR: + return 60 * 60 + case pb.RateLimitResponse_RateLimit_DAY: + return 60 * 60 * 24 + default: + panic("unknown rate limit unit") + } +} diff --git a/src/service/ratelimit.go b/src/service/ratelimit.go index d9817721..46c9dcd8 100644 --- a/src/service/ratelimit.go +++ b/src/service/ratelimit.go @@ -3,13 +3,16 @@ package ratelimit import ( "strings" "sync" + "time" + "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/lyft/goruntime/loader" "github.com/lyft/gostats" "github.com/lyft/ratelimit/src/assert" "github.com/lyft/ratelimit/src/config" "github.com/lyft/ratelimit/src/redis" + "github.com/lyft/ratelimit/src/settings" logger "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -19,6 +22,16 @@ type shouldRateLimitStats struct { serviceError stats.Counter } +// Clock represents a wall clock for time based operations. +type Clock interface { + Now() time.Time +} + +// StdClock returns system time. +type StdClock struct{} + +func (c StdClock) Now() time.Time { return time.Now() } + func newShouldRateLimitStats(scope stats.Scope) shouldRateLimitStats { ret := shouldRateLimitStats{} ret.redisError = scope.NewCounter("redis_error") @@ -47,15 +60,17 @@ type RateLimitServiceServer interface { } type service struct { - runtime loader.IFace - configLock sync.RWMutex - configLoader config.RateLimitConfigLoader - config config.RateLimitConfig - runtimeUpdateEvent chan int - cache redis.RateLimitCache - stats serviceStats - rlStatsScope stats.Scope - legacy *legacyService + runtime loader.IFace + configLock sync.RWMutex + configLoader config.RateLimitConfigLoader + config config.RateLimitConfig + runtimeUpdateEvent chan int + cache redis.RateLimitCache + stats serviceStats + rlStatsScope stats.Scope + legacy *legacyService + responseHeadersEnabled bool + clock Clock } func (this *service) reloadConfig() { @@ -126,6 +141,37 @@ func (this *service) shouldRateLimitWorker( finalCode = descriptorStatus.Code } } + if this.responseHeadersEnabled { + now := this.clock.Now().Unix() + var limitingDescriptor *pb.RateLimitResponse_DescriptorStatus + limitCount := 0 + for _, descriptor := range responseDescriptorStatuses { + if descriptor.CurrentLimit == nil { + continue + } + limitCount++ + if limitingDescriptor == nil || + descriptor.LimitRemaining < limitingDescriptor.LimitRemaining || + descriptor.LimitRemaining == limitingDescriptor.LimitRemaining && + calculateReset(descriptor, now) > calculateReset(limitingDescriptor, now) { + limitingDescriptor = descriptor + } + } + if limitCount == 1 { + response.Headers = []*core.HeaderValue{ + limitHeader(limitingDescriptor), + remainingHeader(limitingDescriptor), + resetHeader(limitingDescriptor, now), + } + } else if limitCount > 1 { + // If there is more than one limit, then picking one of them for the "X-RateLimit-Limit" + // header value would be arbitrary, so we omit it completely. + response.Headers = []*core.HeaderValue{ + remainingHeader(limitingDescriptor), + resetHeader(limitingDescriptor, now), + } + } + } response.OverallCode = finalCode return response @@ -175,17 +221,20 @@ func (this *service) GetCurrentConfig() config.RateLimitConfig { } func NewService(runtime loader.IFace, cache redis.RateLimitCache, - configLoader config.RateLimitConfigLoader, stats stats.Scope) RateLimitServiceServer { + configLoader config.RateLimitConfigLoader, stats stats.Scope, + clock Clock, settings settings.Settings) RateLimitServiceServer { newService := &service{ - runtime: runtime, - configLock: sync.RWMutex{}, - configLoader: configLoader, - config: nil, - runtimeUpdateEvent: make(chan int), - cache: cache, - stats: newServiceStats(stats), - rlStatsScope: stats.Scope("rate_limit"), + runtime: runtime, + configLock: sync.RWMutex{}, + configLoader: configLoader, + config: nil, + runtimeUpdateEvent: make(chan int), + cache: cache, + stats: newServiceStats(stats), + rlStatsScope: stats.Scope("rate_limit"), + responseHeadersEnabled: settings.ResponseHeadersEnabled, + clock: clock, } newService.legacy = &legacyService{ s: newService, diff --git a/src/service/ratelimit_legacy.go b/src/service/ratelimit_legacy.go index a8218279..3af5c882 100644 --- a/src/service/ratelimit_legacy.go +++ b/src/service/ratelimit_legacy.go @@ -6,6 +6,7 @@ import ( "github.com/lyft/gostats" pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" "golang.org/x/net/context" + "strings" ) type RateLimitLegacyServiceServer interface { @@ -90,7 +91,8 @@ func ConvertResponse(response *pb.RateLimitResponse) (*pb_legacy.RateLimitRespon } resp := &pb_legacy.RateLimitResponse{} - err = jsonpb.UnmarshalString(s, resp) + u := jsonpb.Unmarshaler{AllowUnknownFields: true} + err = u.Unmarshal(strings.NewReader(s), resp) if err != nil { return nil, err } diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index cd853517..8f4be146 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -44,8 +44,10 @@ func Run() { rand.New(redis.NewLockedSource(time.Now().Unix())), s.ExpirationJitterMaxSeconds), config.NewRateLimitConfigLoaderImpl(), - srv.Scope().Scope("service")) - + srv.Scope().Scope("service"), + ratelimit.StdClock{}, + s, + ) srv.AddDebugHttpEndpoint( "/rlconfig", "print out the currently loaded configuration for debugging", diff --git a/src/settings/settings.go b/src/settings/settings.go index 89095547..547b81ed 100644 --- a/src/settings/settings.go +++ b/src/settings/settings.go @@ -27,6 +27,7 @@ type Settings struct { RedisPerSecondUrl string `envconfig:"REDIS_PERSECOND_URL" default:"/var/run/nutcracker/ratelimitpersecond.sock"` RedisPerSecondPoolSize int `envconfig:"REDIS_PERSECOND_POOL_SIZE" default:"10"` ExpirationJitterMaxSeconds int64 `envconfig:"EXPIRATION_JITTER_MAX_SECONDS" default:"300"` + ResponseHeadersEnabled bool `envconfig:"RESPONSE_HEADERS_ENABLED" default:"false"` } type Option func(*Settings) diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index c48387f3..3e88994e 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" "github.com/lyft/ratelimit/src/service_cmd/runner" @@ -244,3 +245,45 @@ func TestBasicConfigLegacy(t *testing.T) { assert.NoError(err) } } + +func TestBasicConfigWithHeaders(t *testing.T) { + os.Setenv("RESPONSE_HEADERS_ENABLED", "true") + os.Setenv("REDIS_PERSECOND", "false") + os.Setenv("PORT", "8082") + os.Setenv("GRPC_PORT", "8086") + os.Setenv("DEBUG_PORT", "8084") + os.Setenv("RUNTIME_ROOT", "runtime/current") + os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") + os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") + os.Setenv("REDIS_SOCKET_TYPE", "tcp") + os.Setenv("REDIS_URL", "localhost:6379") + + go func() { + runner.Run() + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + time.Sleep(100 * time.Millisecond) + + assert := assert.New(t) + conn, err := grpc.Dial("localhost:8086", grpc.WithInsecure()) + assert.NoError(err) + defer conn.Close() + c := pb.NewRateLimitServiceClient(conn) + + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequest("basic_headers", [][][2]string{{{"key1", "foo"}}}, 1)) + assert.Equal( + &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OK, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{newDescriptorStatus( + pb.RateLimitResponse_OK, 50, pb.RateLimitResponse_RateLimit_SECOND, 49)}, + Headers: []*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "50"}, + {Key: "X-RateLimit-Remaining", Value: "49"}, + {Key: "X-RateLimit-Reset", Value: "1"}, + }}, + response) + assert.NoError(err) +} diff --git a/test/integration/runtime/current/ratelimit/config/basic_headers.yaml b/test/integration/runtime/current/ratelimit/config/basic_headers.yaml new file mode 100644 index 00000000..7f1b5f10 --- /dev/null +++ b/test/integration/runtime/current/ratelimit/config/basic_headers.yaml @@ -0,0 +1,6 @@ +domain: basic_headers +descriptors: + - key: key1 + rate_limit: + unit: second + requests_per_unit: 50 diff --git a/test/service/ratelimit_legacy_test.go b/test/service/ratelimit_legacy_test.go index f0510f3d..b3ae738d 100644 --- a/test/service/ratelimit_legacy_test.go +++ b/test/service/ratelimit_legacy_test.go @@ -3,6 +3,7 @@ package ratelimit_test import ( "testing" + "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/golang/mock/gomock" @@ -214,6 +215,10 @@ func TestInitialLoadErrorLegacy(test *testing.T) { t := commonSetup(test) defer t.controller.Finish() + t.settings.ResponseHeadersEnabled = false + var currentTime int64 = 0 + var c ratelimit.Clock = stubClock{now: ¤tTime} + t.runtime.EXPECT().AddUpdateCallback(gomock.Any()).Do( func(callback chan<- int) { t.runtimeUpdateCallback = callback }) t.runtime.EXPECT().Snapshot().Return(t.snapshot).MinTimes(1) @@ -224,7 +229,7 @@ func TestInitialLoadErrorLegacy(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, c, t.settings) request := common.NewRateLimitRequestLegacy("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.GetLegacyService().ShouldRateLimit(nil, request) @@ -406,3 +411,31 @@ func TestConvertResponse(test *testing.T) { assert.Equal(test, expectedResponse, resp) } + +func TestConvertResponseWithHeaders(t *testing.T) { + response := &pb.RateLimitResponse{ + OverallCode: pb.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb.RateLimitResponse_DescriptorStatus{{ + Code: pb.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 9, + }}, + Headers: []*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "5"}, + {Key: "X-RateLimit-Remaining", Value: "4"}, + {Key: "X-RateLimit-Reset", Value: "38"}, + }, + } + legacyResponse, err := ratelimit.ConvertResponse(response) + if err != nil { + assert.FailNow(t, err.Error()) + } + assert.Equal(t, &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OVER_LIMIT, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{ + Code: pb_legacy.RateLimitResponse_OK, + CurrentLimit: nil, + LimitRemaining: 9, + }}, + }, legacyResponse) +} diff --git a/test/service/ratelimit_test.go b/test/service/ratelimit_test.go index df5de3eb..97add903 100644 --- a/test/service/ratelimit_test.go +++ b/test/service/ratelimit_test.go @@ -3,13 +3,16 @@ package ratelimit_test import ( "sync" "testing" + "time" + "github.com/envoyproxy/go-control-plane/envoy/api/v2/core" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" "github.com/golang/mock/gomock" "github.com/lyft/gostats" "github.com/lyft/ratelimit/src/config" "github.com/lyft/ratelimit/src/redis" "github.com/lyft/ratelimit/src/service" + "github.com/lyft/ratelimit/src/settings" "github.com/lyft/ratelimit/test/common" "github.com/lyft/ratelimit/test/mocks/config" "github.com/lyft/ratelimit/test/mocks/redis" @@ -24,6 +27,13 @@ type barrier struct { event *sync.Cond } +// stubClock can be used to easily tests time based logic. +type stubClock struct { + now *int64 +} + +func (c stubClock) Now() time.Time { return time.Unix(*c.now, 0) } + func (this *barrier) signal() { this.event.L.Lock() defer this.event.L.Unlock() @@ -56,6 +66,8 @@ type rateLimitServiceTestSuite struct { config *mock_config.MockRateLimitConfig runtimeUpdateCallback chan<- int statStore stats.Store + clock ratelimit.Clock + settings settings.Settings } func commonSetup(t *testing.T) rateLimitServiceTestSuite { @@ -68,6 +80,7 @@ func commonSetup(t *testing.T) rateLimitServiceTestSuite { ret.configLoader = mock_config.NewMockRateLimitConfigLoader(ret.controller) ret.config = mock_config.NewMockRateLimitConfig(ret.controller) ret.statStore = stats.NewStore(stats.NewNullSink(), false) + ret.settings = settings.Settings{} return ret } @@ -82,7 +95,7 @@ func (this *rateLimitServiceTestSuite) setupBasicService() ratelimit.RateLimitSe this.configLoader.EXPECT().Load( []config.RateLimitConfigToLoad{{"config.basic_config", "fake_yaml"}}, gomock.Any()).Return(this.config) - return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore) + return ratelimit.NewService(this.runtime, this.cache, this.configLoader, this.statStore, this.clock, this.settings) } func TestService(test *testing.T) { @@ -225,7 +238,8 @@ func TestInitialLoadError(test *testing.T) { func([]config.RateLimitConfigToLoad, stats.Scope) { panic(config.RateLimitConfigError("load error")) }) - service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore) + service := ratelimit.NewService(t.runtime, t.cache, t.configLoader, t.statStore, + t.clock, t.settings) request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) response, err := service.ShouldRateLimit(nil, request) @@ -233,3 +247,231 @@ func TestInitialLoadError(test *testing.T) { t.assert.Equal("no rate limit configuration loaded", err.Error()) t.assert.EqualValues(1, t.statStore.NewCounter("call.should_rate_limit.service_error").Value()) } + +func TestHeaders(test *testing.T) { + t := commonSetup(test) + var currentTime int64 = 123 + t.settings.ResponseHeadersEnabled = true + t.clock = stubClock{now: ¤tTime} + defer t.controller.Finish() + + service := t.setupBasicService() + + request := common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) + limits := []*config.RateLimit{ + config.NewRateLimit(10, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + } + + // Under limit + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[0].Limit, LimitRemaining: 6}, + }) + response, err := service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "6"}, + {Key: "X-RateLimit-Reset", Value: "57"}, + }, + response.Headers) + t.assert.Nil(err) + + // Last request under limit + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "0"}, + {Key: "X-RateLimit-Reset", Value: "57"}, + }, + response.Headers) + t.assert.Nil(err) + + // Over limit + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "0"}, + {Key: "X-RateLimit-Reset", Value: "57"}, + }, + response.Headers) + t.assert.Nil(err) + + // After time passes, the reset header should decrement + currentTime = 124 + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "0"}, + {Key: "X-RateLimit-Reset", Value: "56"}, + }, response.Headers) + t.assert.Nil(err) + + // Last second before reset + currentTime = 179 + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OVER_LIMIT, + CurrentLimit: limits[0].Limit, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "0"}, + {Key: "X-RateLimit-Reset", Value: "1"}, + }, response.Headers) + t.assert.Nil(err) + + // Exact second when reset occurs + currentTime = 180 + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{{Code: pb.RateLimitResponse_OK, + CurrentLimit: limits[0].Limit, LimitRemaining: 9}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Limit", Value: "10"}, + {Key: "X-RateLimit-Remaining", Value: "9"}, + {Key: "X-RateLimit-Reset", Value: "60"}, + }, response.Headers) + t.assert.Nil(err) + + // Multiple descriptors + // (X-RateLimit-Limit omitted because choosing the limit of one descriptor would be arbitrary) + + currentTime = 200 + request = common.NewRateLimitRequest("test-domain", [][][2]string{ + {{"a", "b"}}, {{"c", "d"}}, {{"e", "f"}}, + }, 1) + limits = []*config.RateLimit{ + config.NewRateLimit(1000, pb.RateLimitResponse_RateLimit_HOUR, "key", t.statStore), + config.NewRateLimit(75, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + config.NewRateLimit(50, pb.RateLimitResponse_RateLimit_MINUTE, "key", t.statStore), + } + + // First descriptor is limiting factor + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[2]).Return(limits[2]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 3}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 4}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[2].Limit, LimitRemaining: 5}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Remaining", Value: "3"}, + {Key: "X-RateLimit-Reset", Value: "3400"}, + }, response.Headers) + t.assert.Nil(err) + + // Second descriptor is limiting factor + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[2]).Return(limits[2]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 6}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 4}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[2].Limit, LimitRemaining: 5}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Remaining", Value: "4"}, + {Key: "X-RateLimit-Reset", Value: "40"}, + }, response.Headers) + t.assert.Nil(err) + + // Third descriptor is limiting factor + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[2]).Return(limits[2]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 6}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 7}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[2].Limit, LimitRemaining: 5}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Remaining", Value: "5"}, + {Key: "X-RateLimit-Reset", Value: "40"}, + }, response.Headers) + t.assert.Nil(err) + + // If there's a LimitRemaining tie, the highest Reset is returned + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[2]).Return(limits[2]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 6}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 6}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[2].Limit, LimitRemaining: 7}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Remaining", Value: "6"}, + {Key: "X-RateLimit-Reset", Value: "3400"}, + }, response.Headers) + t.assert.Nil(err) + + // Same test with same expected result, but inverse descriptor order from cache + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(limits[0]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(limits[1]) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[2]).Return(limits[2]) + t.cache.EXPECT().DoLimit(nil, request, limits).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[2].Limit, LimitRemaining: 7}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[1].Limit, LimitRemaining: 6}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: limits[0].Limit, LimitRemaining: 6}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Equal([]*core.HeaderValue{ + {Key: "X-RateLimit-Remaining", Value: "6"}, + {Key: "X-RateLimit-Reset", Value: "3400"}, + }, response.Headers) + t.assert.Nil(err) + + // No headers if no limit, one descriptor + request = common.NewRateLimitRequest("test-domain", [][][2]string{{{"hello", "world"}}}, 1) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(nil) + t.cache.EXPECT().DoLimit(nil, request, []*config.RateLimit{nil}).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Nil(response.Headers) + t.assert.Nil(err) + + // No headers if no limit, two descriptors + request = common.NewRateLimitRequest("test-domain", [][][2]string{ + {{"foo", "bar"}}, {{"hello", "world"}}}, 1) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[0]).Return(nil) + t.config.EXPECT().GetLimit(nil, "test-domain", request.Descriptors[1]).Return(nil) + t.cache.EXPECT().DoLimit(nil, request, []*config.RateLimit{nil, nil}).Return( + []*pb.RateLimitResponse_DescriptorStatus{ + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + {Code: pb.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}, + }) + response, err = service.ShouldRateLimit(nil, request) + t.assert.Nil(response.Headers) + t.assert.Nil(err) +}