diff --git a/bpf/process/types/basic.h b/bpf/process/types/basic.h index 084d9797ac4..3c234768e5f 100644 --- a/bpf/process/types/basic.h +++ b/bpf/process/types/basic.h @@ -1910,7 +1910,7 @@ struct ratelimit_value { struct { __uint(type, BPF_MAP_TYPE_LRU_HASH); - __uint(max_entries, 32768); + __uint(max_entries, 1); // Agent is resizing this if the feature is needed during kprobe load __type(key, struct ratelimit_key); __type(value, struct ratelimit_value); } ratelimit_map SEC(".maps"); diff --git a/pkg/sensors/tracing/generickprobe.go b/pkg/sensors/tracing/generickprobe.go index 785d280b4df..8a625b5cf41 100644 --- a/pkg/sensors/tracing/generickprobe.go +++ b/pkg/sensors/tracing/generickprobe.go @@ -58,7 +58,10 @@ const ( CharBufErrorTooLarge = -3 CharBufSavedForRetprobe = -4 - stackTraceMapMaxEntries = 32768 // this value could be fine tuned + // The following values could be fine tuned if either those feature use too + // much kernel memory when enabled. + stackTraceMapMaxEntries = 32768 + ratelimitMapMaxEntries = 32768 ) func kprobeCharBufErrorToString(e int32) string { @@ -138,6 +141,9 @@ type genericKprobe struct { // for each kprobe when using single kprobes. hasStackTrace bool + // is there ratelimit defined in the kprobe + hasRatelimit bool + customHandler eventhandler.Handler } @@ -272,6 +278,7 @@ func createMultiKprobeSensor(sensorPath, policyName string, multiIDs []idtable.E data := &genericKprobeData{} oneKprobeHasStackTrace := false + oneKprobeHasRatelimit := false for _, id := range multiIDs { gk, err := genericKprobeTableGet(id) @@ -282,6 +289,7 @@ func createMultiKprobeSensor(sensorPath, policyName string, multiIDs []idtable.E multiRetIDs = append(multiRetIDs, id) } oneKprobeHasStackTrace = oneKprobeHasStackTrace || gk.hasStackTrace + oneKprobeHasRatelimit = oneKprobeHasRatelimit || gk.hasRatelimit gk.data = data } @@ -346,6 +354,9 @@ func createMultiKprobeSensor(sensorPath, policyName string, multiIDs []idtable.E if kernels.EnableLargeProgs() { ratelimitMap := program.MapBuilderPin("ratelimit_map", sensors.PathJoin(sensorPath, "ratelimit_map"), load) + if oneKprobeHasRatelimit { + ratelimitMap.SetMaxEntries(ratelimitMapMaxEntries) + } maps = append(maps, ratelimitMap) } @@ -796,6 +807,7 @@ func addKprobe(funcName string, f *v1alpha1.KProbeSpec, in *addKprobeIn) (id idt message: msgField, tags: tagsField, hasStackTrace: hasStackTrace, + hasRatelimit: selectorsHaveRateLimit(f.Selectors), } // Parse Filters into kernel filter logic @@ -907,6 +919,11 @@ func createKprobeSensorFromEntry(kprobeEntry *genericKprobe, sensorPath string, if kernels.EnableLargeProgs() { ratelimitMap := program.MapBuilderPin("ratelimit_map", sensors.PathJoin(sensorPath, "ratelimit_map"), load) + if kprobeEntry.hasRatelimit { + // similarly as for stacktrace, we expand the max size only if + // needed to reduce the memory footprint when unused + ratelimitMap.SetMaxEntries(ratelimitMapMaxEntries) + } maps = append(maps, ratelimitMap) } @@ -1284,3 +1301,14 @@ func retprobeMerge(prev pendingEvent, curr pendingEvent) *tracing.MsgGenericKpro func (k *observerKprobeSensor) LoadProbe(args sensors.LoadProbeArgs) error { return loadGenericKprobeSensor(args.BPFDir, args.Load, args.Verbose) } + +func selectorsHaveRateLimit(selectors []v1alpha1.KProbeSelector) bool { + for _, selector := range selectors { + for _, matchAction := range selector.MatchActions { + if len(matchAction.RateLimit) > 0 { + return true + } + } + } + return false +}