From 39a545d17e670daf479e5d9b84434ef45d258eac Mon Sep 17 00:00:00 2001 From: Samuele Date: Thu, 12 Oct 2023 06:33:26 +0200 Subject: [PATCH] feat(tools): add request aware table to help detect concurrent requests accessing the same shared table (#11017) KAG-1570 --------- Co-authored-by: Datong Sun Co-authored-by: Datong Sun --- .../unreleased/kong/request-aware-table.yml | 3 + kong-3.5.0-0.rockspec | 1 + kong/plugins/rate-limiting/policies/init.lua | 41 +++--- kong/templates/nginx_kong.lua | 1 + kong/tools/request_aware_table.lua | 123 +++++++++++++++++ .../05-proxy/33-request-aware-table_spec.lua | 125 ++++++++++++++++++ .../plugins/request-aware-table/handler.lua | 41 ++++++ .../plugins/request-aware-table/schema.lua | 11 ++ 8 files changed, 327 insertions(+), 19 deletions(-) create mode 100644 changelog/unreleased/kong/request-aware-table.yml create mode 100644 kong/tools/request_aware_table.lua create mode 100644 spec/02-integration/05-proxy/33-request-aware-table_spec.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/request-aware-table/handler.lua create mode 100644 spec/fixtures/custom_plugins/kong/plugins/request-aware-table/schema.lua diff --git a/changelog/unreleased/kong/request-aware-table.yml b/changelog/unreleased/kong/request-aware-table.yml new file mode 100644 index 000000000000..96cf070b5d0d --- /dev/null +++ b/changelog/unreleased/kong/request-aware-table.yml @@ -0,0 +1,3 @@ +message: Add a request-aware table able to detect accesses from different requests. +type: feature +scope: Core diff --git a/kong-3.5.0-0.rockspec b/kong-3.5.0-0.rockspec index cf357401cf6b..c02b73864a35 100644 --- a/kong-3.5.0-0.rockspec +++ b/kong-3.5.0-0.rockspec @@ -163,6 +163,7 @@ build = { ["kong.tools.kong-lua-sandbox"] = "kong/tools/kong-lua-sandbox.lua", ["kong.tools.protobuf"] = "kong/tools/protobuf.lua", ["kong.tools.mime_type"] = "kong/tools/mime_type.lua", + ["kong.tools.request_aware_table"] = "kong/tools/request_aware_table.lua", ["kong.runloop.handler"] = "kong/runloop/handler.lua", ["kong.runloop.events"] = "kong/runloop/events.lua", diff --git a/kong/plugins/rate-limiting/policies/init.lua b/kong/plugins/rate-limiting/policies/init.lua index 12f9f32983e8..80731cb0648b 100644 --- a/kong/plugins/rate-limiting/policies/init.lua +++ b/kong/plugins/rate-limiting/policies/init.lua @@ -2,7 +2,7 @@ local policy_cluster = require "kong.plugins.rate-limiting.policies.cluster" local timestamp = require "kong.tools.timestamp" local reports = require "kong.reports" local redis = require "resty.redis" -local table_clear = require "table.clear" +local request_aware_table = require "kong.tools.request_aware_table" local kong = kong local pairs = pairs @@ -18,23 +18,26 @@ local EMPTY_UUID = "00000000-0000-0000-0000-000000000000" -- for `conf.sync_rate > 0` local auto_sync_timer -local cur_usage = { - --[[ - [cache_key] = - --]] -} +local cur_usage = request_aware_table.new() +-- { +-- [[ +-- [cache_key] = +-- ]] +-- } -local cur_usage_expire_at = { - --[[ - [cache_key] = - --]] -} +local cur_usage_expire_at = request_aware_table.new() +-- { +-- [[ +-- [cache_key] = +-- ]] +-- } -local cur_delta = { - --[[ - [cache_key] = - --]] -} +local cur_delta = request_aware_table.new() +-- { +-- [[ +-- [cache_key] = +-- ]] +-- } local function is_present(str) @@ -138,9 +141,9 @@ local function get_redis_connection(conf) end local function clear_local_counter() - table_clear(cur_usage) - table_clear(cur_usage_expire_at) - table_clear(cur_delta) + cur_usage:clear() + cur_usage_expire_at:clear() + cur_delta:clear() end local function sync_to_redis(premature, conf) diff --git a/kong/templates/nginx_kong.lua b/kong/templates/nginx_kong.lua index 2da98f9213d6..efa41d2b8239 100644 --- a/kong/templates/nginx_kong.lua +++ b/kong/templates/nginx_kong.lua @@ -58,6 +58,7 @@ log_format kong_log_format '$remote_addr - $remote_user [$time_local] ' # Load variable indexes lua_kong_load_var_index default; +lua_kong_load_var_index $request_id; upstream kong_upstream { server 0.0.0.1; diff --git a/kong/tools/request_aware_table.lua b/kong/tools/request_aware_table.lua new file mode 100644 index 000000000000..fd9f2ad90356 --- /dev/null +++ b/kong/tools/request_aware_table.lua @@ -0,0 +1,123 @@ +--- NOTE: tool is designed to assist with **detecting** request contamination +-- issues on CI, during test runs. It does not offer security safeguards. + +local table_new = require "table.new" +local table_clear = require "table.clear" + +local debug_mode = (kong.configuration.log_level == "debug") +local error = error +local rawset = rawset +local setmetatable = setmetatable +local get_phase = ngx.get_phase +local var = ngx.var + + +local NGX_VAR_PHASES = { + set = true, + rewrite = true, + access = true, + content = true, + header_filter = true, + body_filter = true, + log = true, + balancer = true, +} +local ALLOWED_REQUEST_ID_K = "__allowed_request_id" + + +-- Check if access is allowed for table, based on the request ID +local function enforce_sequential_access(table) + if not NGX_VAR_PHASES[get_phase()] then + -- allow access and reset allowed request ID + rawset(table, ALLOWED_REQUEST_ID_K, nil) + return + end + + local curr_request_id = var.request_id + local allowed_request_id = rawget(table, ALLOWED_REQUEST_ID_K) + if not allowed_request_id then + -- first access. Set allowed request ID and allow access + rawset(table, ALLOWED_REQUEST_ID_K, curr_request_id) + return + end + + if curr_request_id ~= table[ALLOWED_REQUEST_ID_K] then + error("concurrent access from different request to shared table detected", 2) + end +end + + +local function clear_table(self) + if not debug_mode then + table_clear(self) + return + end + + table_clear(self.__data) + rawset(self, ALLOWED_REQUEST_ID_K, nil) +end + + +local __proxy_mt = { + __index = function(t, k) + if k == "clear" then + return clear_table + end + + enforce_sequential_access(t) + return t.__data[k] + end, + + __newindex = function(t, k, v) + if k == "clear" then + error("cannot set the 'clear' method of request aware table", 2) + end + + enforce_sequential_access(t) + t.__data[k] = v + end, +} + + +local __direct_mt = { + __index = { clear = clear_table }, + + __newindex = function(t, k, v) + if k == "clear" then + error("cannot set the 'clear' method of request aware table", 2) + end + + rawset(t, k, v) + end, +} + + +-- Request aware table constructor +-- +-- Creates a new table with request-aware access logic to protect the +-- underlying data from race conditions. +-- The table includes a :clear() method to delete all elements. +-- +-- The request-aware access logic is turned off when `debug_mode` is disabled. +-- +-- @param narr (optional) pre allocated array elements +-- @param nrec (optional) pre allocated hash elements +-- @return The newly created table with request-aware access +local function new(narr, nrec) + if not narr then narr = 0 end + if not nrec then nrec = 0 end + + local data = table_new(narr, nrec) + + -- return table without proxy when debug_mode is disabled + if not debug_mode then + return setmetatable(data, __direct_mt) + end + + -- wrap table in proxy (for access checks) when debug_mode is enabled + return setmetatable({ __data = data }, __proxy_mt) +end + +return { + new = new, +} diff --git a/spec/02-integration/05-proxy/33-request-aware-table_spec.lua b/spec/02-integration/05-proxy/33-request-aware-table_spec.lua new file mode 100644 index 000000000000..bbce6e3079ca --- /dev/null +++ b/spec/02-integration/05-proxy/33-request-aware-table_spec.lua @@ -0,0 +1,125 @@ +local helpers = require "spec.helpers" + +local LOG_LEVELS = { + "debug", + "info", + -- any other log level behaves the same as "info" +} + + +local function trigger_plugin_new_table() + local client = helpers.proxy_client() + local res = client:get("/", { + query = { + new_tab = true, + } + }) + assert.response(res).has.status(200) + assert.logfile().has.no.line("[error]", true) + client:close() +end + +local function trigger_plugin_clear_table() + local client = helpers.proxy_client() + local res = client:get("/", { + query = { + clear = true, + } + }) + assert.response(res).has.status(200) + assert.logfile().has.no.line("[error]", true) + client:close() +end + +for _, log_level in ipairs(LOG_LEVELS) do + local concurrency_checks = log_level == "debug" + + for _, strategy in helpers.each_strategy() do + describe("request aware table tests [#" .. strategy .. "] concurrency checks: " .. tostring(concurrency_checks), function() + local client + lazy_setup(function() + local bp = helpers.get_db_utils(strategy, { + "plugins", + "routes", + "services", + }, { + "request-aware-table" + }) + + local service = assert(bp.services:insert({ + url = helpers.mock_upstream_url + })) + + local route = bp.routes:insert({ + service = service, + paths = { "/" } + }) + + bp.plugins:insert({ + name = "request-aware-table", + route = { id = route.id }, + }) + + helpers.start_kong({ + database = strategy, + plugins = "bundled, request-aware-table", + nginx_conf = "spec/fixtures/custom_nginx.template", + log_level = log_level, + }) + end) + + lazy_teardown(function() + helpers.stop_kong() + end) + + before_each(function() + helpers.clean_logfile() + trigger_plugin_new_table() + client = helpers.proxy_client() + end) + + after_each(function() + if client then + client:close() + end + end) + + it("allows access when there are no race conditions", function() + local res = client:get("/") + assert.response(res).has.status(200) + assert.logfile().has.no.line("[error]", true) + end) + + it("denies access when there are race conditions and checks are enabled (else allows)", function() + -- access from request 1 (don't clear) + local r = client:get("/") + assert.response(r).has.status(200) + + -- access from request 2 + r = client:get("/") + if concurrency_checks then + assert.logfile().has.line("concurrent access from different request to shared table detected", true) + else + assert.response(r).has.status(200) + end + end) + + it("allows access when table is cleared between requests", function() + -- access from request 1 (clear) + local r = client:get("/") + assert.response(r).has.status(200) + trigger_plugin_clear_table() + + -- access from request 2 (clear) + r = client:get("/") + assert.response(r).has.status(200) + trigger_plugin_clear_table() + + -- access from request 3 + r = client:get("/") + assert.response(r).has.status(200) + assert.logfile().has.no.line("[error]", true) + end) + end) + end +end diff --git a/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/handler.lua b/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/handler.lua new file mode 100644 index 000000000000..a3c640cfeca8 --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/handler.lua @@ -0,0 +1,41 @@ +local RAT = require "kong.tools.request_aware_table" + +local kong = kong +local tab + +local _M = { + PRIORITY = 1001, + VERSION = "1.0", +} + +local function access_table() + -- write access + tab.foo = "bar" + tab.bar = "baz" + -- read/write access + tab.baz = tab.foo .. tab.bar +end + + +function _M:access(conf) + local query = kong.request.get_query() + if query.new_tab == "true" then + -- new table + tab = RAT.new() + ngx.exit(200) + end + + if query.clear == "true" then + -- clear table + tab:clear() + ngx.exit(200) + end + + -- access multiple times during same request + for _ = 1, 3 do + access_table() + end +end + + +return _M diff --git a/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/schema.lua b/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/schema.lua new file mode 100644 index 000000000000..2881307755a5 --- /dev/null +++ b/spec/fixtures/custom_plugins/kong/plugins/request-aware-table/schema.lua @@ -0,0 +1,11 @@ +return { + name = "request-aware-table", + fields = { + { + config = { + type = "record", + fields = { } + } + } + } +}