From 4a11569ec92d1aafe656104dc91b2fbe74704d90 Mon Sep 17 00:00:00 2001 From: Vinicius Mignot Date: Thu, 9 Apr 2020 18:35:31 -0300 Subject: [PATCH 01/31] fix(declarative) purge cache before loading The cache must be purged before loading to prevent that removed entries are kept in the database after a reload. Fix #5705 --- kong/db/declarative/init.lua | 3 + spec/02-integration/02-cmd/03-reload_spec.lua | 177 ++++++++++++++++-- 2 files changed, 168 insertions(+), 12 deletions(-) diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index c80ea9de2077..f21a9b99ce2c 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -360,6 +360,9 @@ function declarative.load_into_cache(entities, hash, shadow_page) -- but filtered for a given tag local tags_by_name = {} + kong.core_cache:purge() + kong.cache:purge() + for entity_name, items in pairs(entities) do local dao = kong.db[entity_name] local schema = dao.schema diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index 4eebbca84b9e..6d2769956b4e 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -2,9 +2,6 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -for _, strategy in helpers.each_strategy() do - - local function get_kong_workers() local workers helpers.wait_until(function() @@ -40,7 +37,7 @@ local function assert_wait_call(fn, ...) end -local function wait_until_no_common_workers(workers, expected_total) +local function wait_until_no_common_workers(workers, expected_total, strategy) if strategy == "cassandra" then ngx.sleep(0.5) end @@ -71,16 +68,18 @@ local function wait_until_no_common_workers(workers, expected_total) end -local function kong_reload(...) +local function kong_reload(strategy, ...) local workers = get_kong_workers() local ok, err = helpers.kong_exec(...) if ok then - wait_until_no_common_workers(workers) + wait_until_no_common_workers(workers, nil, strategy) end return ok, err end +for _, strategy in helpers.each_strategy() do + describe("kong reload #" .. strategy, function() lazy_setup(function() helpers.get_db_utils(nil, {}) -- runs migrations @@ -99,7 +98,7 @@ describe("kong reload #" .. strategy, function() local nginx_pid = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) -- kong_exec uses test conf too, so same prefix - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) local nginx_pid_after = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) @@ -182,7 +181,7 @@ describe("kong reload #" .. strategy, function() local prng_seeds_1 = json.prng_seeds client:close() - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -215,7 +214,7 @@ describe("kong reload #" .. strategy, function() local node_id_1 = json.node_id client:close() - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -291,7 +290,7 @@ describe("kong reload #" .. strategy, function() - example.test ]], yaml_file) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) helpers.wait_until(function() pok, admin_client = pcall(helpers.admin_client) @@ -361,7 +360,7 @@ describe("kong reload #" .. strategy, function() admin_client = assert(helpers.admin_client()) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) admin_client = assert(helpers.admin_client()) local res = assert(admin_client:send { @@ -452,7 +451,7 @@ describe("kong reload #" .. strategy, function() weight: 100 ]], yaml_file) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) helpers.wait_until(function() pok, admin_client = pcall(helpers.admin_client) @@ -492,3 +491,157 @@ describe("kong reload #" .. strategy, function() end) end + + +describe("key-auth plugin invalidation on dbless reload", function() + it("(regression - issue 5705)", function() + local admin_client + local proxy_client + local yaml_file = helpers.make_yaml_file([[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + consumers: + - username: my-user + keyauth_credentials: + - key: my-key + ]]) + + finally(function() + os.remove(yaml_file) + helpers.stop_kong(helpers.test_conf.prefix, true) + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + end) + + assert(helpers.start_kong({ + database = "off", + declarative_config = yaml_file, + nginx_worker_processes = 1, + nginx_conf = "spec/fixtures/custom_nginx.template", + })) + + proxy_client = helpers.proxy_client() + local res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-key" + } + }) + assert.res_status(200, res) + + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-new-key" + } + }) + assert.res_status(401, res) + + proxy_client:close() + + admin_client = assert(helpers.admin_client()) + local res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same(1, #json.data) + assert.same("my-key", json.data[1].key) + admin_client:close() + + helpers.make_yaml_file([[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + consumers: + - username: my-user + keyauth_credentials: + - key: my-new-key + ]], yaml_file) + assert(kong_reload("off", "reload --prefix " .. helpers.test_conf.prefix)) + + + local res + + helpers.wait_until(function() + admin_client = assert(helpers.admin_client()) + + res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + admin_client:close() + assert.same(1, #json.data) + return "my-new-key" == json.data[1].key + end, 5) + + helpers.wait_until(function() + proxy_client = helpers.proxy_client() + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-key" + } + }) + proxy_client:close() + return res.status == 401 + end, 5) + + helpers.wait_until(function() + proxy_client = helpers.proxy_client() + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-new-key" + } + }) + local body = res:read_body() + proxy_client:close() + return body ~= [[{"message":"Invalid authentication credentials"}]] + end, 5) + + admin_client = assert(helpers.admin_client()) + local res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same(1, #json.data) + assert.same("my-new-key", json.data[1].key) + admin_client:close() + + end) +end) + From 828a898b3211eb1b4aef74c94e9db1e94ce19036 Mon Sep 17 00:00:00 2001 From: Travis Raines Date: Thu, 16 Apr 2020 03:01:00 -0700 Subject: [PATCH 02/31] feat(config) add "kic" configuration for reporting (#5765) * feat(config) add "kic" setting Add a "kic" setting to kong.conf. This is a boolean indicating whether the instance is managed by the Kong Ingress Controller. It is not documented and not included in kong.conf.default, as users should not set it directly. Kubernetes tooling will set it automatically if appropriate. * feat(reports) send KIC usage Add ingress controller usage setting to anonymous reports. --- kong/conf_loader.lua | 1 + kong/reports.lua | 1 + kong/templates/kong_defaults.lua | 1 + spec/01-unit/11-reports_spec.lua | 26 ++++++++++++++++++++++++++ 4 files changed, 29 insertions(+) diff --git a/kong/conf_loader.lua b/kong/conf_loader.lua index bcafc1193b16..12a3300936fe 100644 --- a/kong/conf_loader.lua +++ b/kong/conf_loader.lua @@ -397,6 +397,7 @@ local CONF_INFERENCES = { cluster_control_plane = { typ = "string", }, cluster_cert = { typ = "string" }, cluster_cert_key = { typ = "string" }, + kic = { typ = "boolean" }, } diff --git a/kong/reports.lua b/kong/reports.lua index 00223bb90c5b..afc513ec8088 100644 --- a/kong/reports.lua +++ b/kong/reports.lua @@ -336,6 +336,7 @@ local function configure_ping(kong_conf) add_immutable_value("database", kong_conf.database) add_immutable_value("role", kong_conf.role) + add_immutable_value("kic", kong_conf.kic) add_immutable_value("_admin", #kong_conf.admin_listeners > 0 and 1 or 0) add_immutable_value("_proxy", #kong_conf.proxy_listeners > 0 and 1 or 0) add_immutable_value("_stream", #kong_conf.stream_listeners > 0 and 1 or 0) diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index 34b13eb6a07f..d6fdb740aa75 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -120,4 +120,5 @@ lua_package_path = ./?.lua;./?/init.lua; lua_package_cpath = NONE role = traditional +kic = off ]] diff --git a/spec/01-unit/11-reports_spec.lua b/spec/01-unit/11-reports_spec.lua index ee59d0212729..96eab5fd7169 100644 --- a/spec/01-unit/11-reports_spec.lua +++ b/spec/01-unit/11-reports_spec.lua @@ -170,6 +170,32 @@ describe("reports", function() end) end) + describe("sends 'kic'", function() + it("default (off)", function() + local conf = assert(conf_loader(nil)) + reports.configure_ping(conf) + + local thread = helpers.tcp_server(8189) + reports.send_ping("127.0.0.1", 8189) + + local _, res = assert(thread:join()) + assert._matches("kic=false", res, nil, true) + end) + + it("enabled", function() + local conf = assert(conf_loader(nil, { + kic = "on", + })) + reports.configure_ping(conf) + + local thread = helpers.tcp_server(8189) + reports.send_ping("127.0.0.1", 8189) + + local _, res = assert(thread:join()) + assert.matches("kic=true", res, nil, true) + end) + end) + describe("sends '_admin' for 'admin_listen'", function() it("off", function() local conf = assert(conf_loader(nil, { From 9753915a4a14c9839919bb89efa89d69ab786b06 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Thu, 9 Apr 2020 13:18:32 -0700 Subject: [PATCH 03/31] tests(helpers) implement the 'errlog' and 'line' assertion and modifier --- spec/helpers.lua | 98 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/spec/helpers.lua b/spec/helpers.lua index ff9542a25e8b..6fe5052ad838 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -1772,6 +1772,104 @@ luassert:register("assertion", "cn", assert_cn, "assertion.cn.positive") +do + --- Generic modifier "errlog" + -- Will set an "errlog_path" value in the assertion state. + -- @name errlog + -- @param path A path to the errlog file (defaults to the test prefix's + -- errlog). + local function modifier_errlog(state, args) + local errlog_path = args[1] or conf.nginx_err_logs + + assert(type(errlog_path) == "string", "errlog modifier expects nil, or " .. + "a string as argument, got: " .. + type(errlog_path)) + + rawset(state, "errlog_path", errlog_path) + + return state + end + + luassert:register("modifier", "errlog", modifier_errlog) + + + --- Assertion checking is any line from a file matches the given regex or + -- substring. + -- @name line + -- @param regex The regex to evaluate against each line. + -- @param plain If true, the regex argument will be considered as a plain + -- string. + -- @param timeout An optional timeout after which the assertion will fail if + -- reached. + -- @param fpath An optional path to the file (defaults to the errlog + -- modifier) + -- @see errlog + -- @usage + -- assert.not_line("[error]", true) + -- assert.errlog().not_has.line("[error]", true) + local function match_line(state, args) + local regex = args[1] + local plain = args[2] + local timeout = args[3] or 2 + local fpath = args[4] or rawget(state, "errlog_path") + + assert(type(regex) == "string", + "Expected the regex argument to be a string") + assert(type(fpath) == "string", + "Expected the file path argument to be a string") + assert(type(timeout) == "number" and timeout > 0, + "Expected the timeout argument to be a positive number") + + local pok = pcall(wait_until, function() + local logs = pl_file.read(fpath) + local from, _, err + + for line in logs:gmatch("[^\r\n]+") do + if plain then + from = string.find(line, regex, nil, true) + + else + from, _, err = ngx.re.find(line, regex) + if err then + error(err) + end + end + + if from then + table.insert(args, 1, line) + table.insert(args, 1, regex) + args.n = 2 + return true + end + end + end, timeout) + + table.insert(args, 1, fpath) + args.n = args.n + 1 + + return pok + end + + say:set("assertion.match_line.negative", unindent [[ + Expected file at: + %s + To match: + %s + ]]) + say:set("assertion.match_line.positive", unindent [[ + Expected file at: + %s + To not match: + %s + But matched line: + %s + ]]) + luassert:register("assertion", "line", match_line, + "assertion.match_line.negative", + "assertion.match_line.positive") +end + + ---------------- -- DNS-record mocking. -- These function allow to create mock dns records that the test Kong instance From e4ad990412fe4b676885c86a30b68fd31b530a72 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 14 Apr 2020 12:55:19 -0700 Subject: [PATCH 04/31] fix(reports) avoid incrementing log counters on unexpected errors E.g: https://discuss.konghq.com/t/unknown-request-scheme-log-spam-in-proxy-container/5983 --- kong/reports.lua | 27 ++++++++++--------- .../05-proxy/22-reports_spec.lua | 17 ++++++++++++ 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/kong/reports.lua b/kong/reports.lua index afc513ec8088..522b601e2b52 100644 --- a/kong/reports.lua +++ b/kong/reports.lua @@ -205,7 +205,7 @@ end -- returns a string indicating the "kind" of the current request/stream: -- "http", "https", "h2c", "h2", "grpc", "grpcs", "ws", "wss", "tcp", "tls" -- or nil + error message if the suffix could not be determined -local function get_current_suffix() +local function get_current_suffix(ctx) if subsystem == "stream" then if var.ssl_protocol then return "tls" @@ -215,8 +215,8 @@ local function get_current_suffix() end local scheme = var.scheme + local proxy_mode = var.kong_proxy_mode if scheme == "http" or scheme == "https" then - local proxy_mode = var.kong_proxy_mode if proxy_mode == "http" then local http_upgrade = var.http_upgrade if http_upgrade and lower(http_upgrade) == "websocket" then @@ -235,7 +235,7 @@ local function get_current_suffix() return "h2" end - return scheme + return scheme -- http/https end if proxy_mode == "grpc" then @@ -249,7 +249,12 @@ local function get_current_suffix() end end - return nil, "unknown request scheme: " .. tostring(scheme) + if ctx.KONG_UNEXPECTED then + return nil + end + + log(WARN, "could not determine log suffix (scheme=", tostring(scheme), + ", proxy_mode=", tostring(proxy_mode), ")") end @@ -405,17 +410,15 @@ return { local count_key = subsystem == "stream" and STREAM_COUNT_KEY or REQUEST_COUNT_KEY - incr_counter(count_key) - local suffix, err = get_current_suffix() + + if ctx.ran_go_plugin then + incr_counter(GO_PLUGINS_REQUEST_COUNT_KEY) + end + + local suffix = get_current_suffix(ctx) if suffix then incr_counter(count_key .. ":" .. suffix) - - if ctx.ran_go_plugin then - incr_counter(GO_PLUGINS_REQUEST_COUNT_KEY) - end - else - log(WARN, err) end end, diff --git a/spec/02-integration/05-proxy/22-reports_spec.lua b/spec/02-integration/05-proxy/22-reports_spec.lua index f930cca5ef42..7aa17dd95b9a 100644 --- a/spec/02-integration/05-proxy/22-reports_spec.lua +++ b/spec/02-integration/05-proxy/22-reports_spec.lua @@ -420,5 +420,22 @@ for _, strategy in helpers.each_strategy() do assert.match("tcp_streams=1", reports_data[1]) -- it counts the stream request for the ping assert.match("tls_streams=1", reports_data[1]) end) + + it("does not log NGINX-produced errors", function() + local proxy_client = assert(helpers.proxy_client()) + local res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["X-Large"] = string.rep("a", 2^10 * 10), -- default large_client_header_buffers is 8k + } + }) + assert.res_status(494, res) + proxy_client:close() + + assert.errlog() + .not_has + .line([[could not determine log suffix]], true) + end) end) end From e72408a823322af472072e325570b77c95fba6bc Mon Sep 17 00:00:00 2001 From: Javier Date: Mon, 20 Apr 2020 09:31:14 -0500 Subject: [PATCH 05/31] fix(cache) disable JIT mlcache:get_bulk() on ARM64 (#5797) The loop inside this function can trigger a trace restart that results in a wrong value in the local index variable. (kong#5748) until this is fixed in LuaJIT, disabling JIT for this function avoids the problem. --- kong/runloop/certificate.lua | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kong/runloop/certificate.lua b/kong/runloop/certificate.lua index 549ff04ac521..594212d475ce 100644 --- a/kong/runloop/certificate.lua +++ b/kong/runloop/certificate.lua @@ -3,6 +3,11 @@ local ngx_ssl = require "ngx.ssl" local pl_utils = require "pl.utils" local mlcache = require "resty.mlcache" +if jit.arch == 'arm64' then + jit.off(mlcache.get_bulk) -- "temporary" workaround for issue #5748 on ARM +end + + local ngx_log = ngx.log local ERR = ngx.ERR From 5c8fd0cc317b7f684da4013f89de9b8d678085c8 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Wed, 22 Apr 2020 07:11:58 -0700 Subject: [PATCH 06/31] chore(dependency) bump the kong-build-tools dependency (#5803) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 31f1c67b5dec..fbaf5378e72a 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ RESTY_VERSION ?= `grep RESTY_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk RESTY_LUAROCKS_VERSION ?= `grep RESTY_LUAROCKS_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` RESTY_OPENSSL_VERSION ?= `grep RESTY_OPENSSL_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` RESTY_PCRE_VERSION ?= `grep RESTY_PCRE_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` -KONG_BUILD_TOOLS ?= '4.2.2' +KONG_BUILD_TOOLS ?= '4.3.1' KONG_VERSION ?= `cat $(KONG_SOURCE_LOCATION)/kong-*.rockspec | grep tag | awk '{print $$3}' | sed 's/"//g'` OPENRESTY_PATCHES_BRANCH ?= master KONG_NGINX_MODULE_BRANCH ?= master From a524aa4cde5ecfcadbd84465c34d524cbc041ca6 Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Wed, 22 Apr 2020 23:13:12 +0800 Subject: [PATCH 07/31] fix(declarative) send config updates to stream subsystem via Unix domain socket MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the stream subsystem receives the declarative config from the filesystem only. Once Kong is running, updating the declarative config through `/config` admin API no longer propagates to the stream subsystem. In the long term we will have shared shdict between `http` and `stream` subsystem which solves the problem. However this PR provides a temporary and clean solution for getting around the issue without introducing new OpenResty patches, and can be reverted in the future easily once situation changes. Fixes #5656 Co-Authored-By: Enrique GarcĂ­a Cota --- kong/db/declarative/init.lua | 27 ++++++++++ kong/init.lua | 49 ++++++++++++++++++- kong/templates/nginx_kong_stream.lua | 12 +++++ .../04-admin_api/15-off_spec.lua | 38 ++++++++++++++ .../02-integration/05-proxy/01-proxy_spec.lua | 31 +++++++++--- spec/fixtures/custom_nginx.template | 12 +++++ 6 files changed, 159 insertions(+), 10 deletions(-) diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index f21a9b99ce2c..bd5f44b613ee 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -10,7 +10,10 @@ local deepcopy = tablex.deepcopy local null = ngx.null local SHADOW = true local md5 = ngx.md5 +local ngx_socket_tcp = ngx.socket.tcp local REMOVE_FIRST_LINE_PATTERN = "^[^\n]+\n(.+)$" +local PREFIX = ngx.config.prefix() +local SUBSYS = ngx.config.subsystem local declarative = {} @@ -521,6 +524,30 @@ function declarative.load_into_cache_with_events(entities, hash) return nil, err end + if SUBSYS == "http" and #kong.configuration.stream_listeners > 0 and + ngx.get_phase() ~= "init_worker" + then + -- update stream if necessary + -- TODO: remove this once shdict can be shared between subsystems + + local sock = ngx_socket_tcp() + ok, err = sock:connect("unix:" .. PREFIX .. "/stream_config.sock") + if not ok then + return nil, err + end + + local json = cjson.encode({ entities, hash, }) + local bytes + bytes, err = sock:send(json) + sock:close() + + if not bytes then + return nil, err + end + + assert(bytes == #json, "incomplete config sent to the stream subsystem") + end + ok, err = kong.worker_events.post("balancer", "upstreams", { operation = "delete_all", entity = { id = "all", name = "all" } diff --git a/kong/init.lua b/kong/init.lua index d2b3e10995e6..6d0c732e8b19 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -419,7 +419,9 @@ function Kong.init() certificate.init() end - clustering.init(config) + if subsystem == "http" then + clustering.init(config) + end -- Load plugins as late as possible so that everything is set up assert(db.plugins:load_plugin_schemas(config.loaded_plugins)) @@ -550,7 +552,9 @@ function Kong.init_worker() go.manage_pluginserver() end - clustering.init_worker(kong.configuration) + if subsystem == "http" then + clustering.init_worker(kong.configuration) + end end @@ -1155,4 +1159,45 @@ function Kong.serve_cluster_listener(options) end +do + local declarative = require("kong.db.declarative") + local cjson = require("cjson.safe") + + function Kong.stream_config_listener() + local sock, err = ngx.req.socket() + if not sock then + kong.log.crit("unable to obtain request socket: ", err) + return + end + + local data, err = sock:receive("*a") + if not data then + ngx_log(ngx_CRIT, "unable to receive new config: ", err) + return + end + + local parsed + parsed, err = cjson.decode(data) + if not parsed then + kong.log.err("unable to parse received declarative config: ", err) + return + end + + local ok, err = concurrency.with_worker_mutex({ name = "dbless-worker" }, function() + return declarative.load_into_cache_with_events(parsed[1], parsed[2]) + end) + + if not ok then + if err == "no memory" then + kong.log.err("not enough cache space for declarative config, " .. + "consider raising the \"mem_cache_size\" Kong config") + + else + kong.log.err("failed loading declarative config into cache: ", err) + end + end + end +end + + return Kong diff --git a/kong/templates/nginx_kong_stream.lua b/kong/templates/nginx_kong_stream.lua index c70a38f030f3..c9c6e33eac83 100644 --- a/kong/templates/nginx_kong_stream.lua +++ b/kong/templates/nginx_kong_stream.lua @@ -119,5 +119,17 @@ server { Kong.log() } } + +> if database == "off" then +server { + listen unix:${{PREFIX}}/stream_config.sock; + + error_log ${{ADMIN_ERROR_LOG}} ${{LOG_LEVEL}}; + + content_by_lua_block { + Kong.stream_config_listener() + } +} +> end -- database == "off" > end -- #stream_listeners > 0 ]] diff --git a/spec/02-integration/04-admin_api/15-off_spec.lua b/spec/02-integration/04-admin_api/15-off_spec.lua index 0fd0e9d71642..fa8c681d4aa0 100644 --- a/spec/02-integration/04-admin_api/15-off_spec.lua +++ b/spec/02-integration/04-admin_api/15-off_spec.lua @@ -25,6 +25,8 @@ describe("Admin API #off", function() assert(helpers.start_kong({ database = "off", mem_cache_size = "10m", + stream_listen = "127.0.0.1:9011", + nginx_conf = "spec/fixtures/custom_nginx.template", })) end) @@ -206,6 +208,7 @@ describe("Admin API #off", function() assert.response(res).has.status(201) end) + it("accepts configuration as a JSON string", function() local res = assert(client:send { method = "POST", @@ -551,6 +554,41 @@ describe("Admin API #off", function() assert.response(res).has.status(201) end) + + it("updates stream subsystem config", function() + local res = assert(client:send { + method = "POST", + path = "/config", + body = { + config = [[ + _format_version: "1.1" + services: + - connect_timeout: 60000 + host: 127.0.0.1 + name: mock + port: 15557 + protocol: tcp + routes: + - name: mock_route + protocols: + - tcp + destinations: + - port: 9011 + ]], + }, + headers = { + ["Content-Type"] = "application/json" + } + }) + + assert.response(res).has.status(201) + + local sock = ngx.socket.tcp() + assert(sock:connect("127.0.0.1", 9011)) + assert(sock:send("hi\n")) + assert.equals(sock:receive(), "hi") + sock:close() + end) end) describe("/upstreams", function() diff --git a/spec/02-integration/05-proxy/01-proxy_spec.lua b/spec/02-integration/05-proxy/01-proxy_spec.lua index 92c55349d189..594cb4778b18 100644 --- a/spec/02-integration/05-proxy/01-proxy_spec.lua +++ b/spec/02-integration/05-proxy/01-proxy_spec.lua @@ -15,9 +15,9 @@ local function get_listeners(filename) local file = assert(utils.readfile(filename)) local result = {} for block in file:gmatch("[%\n%s]+server%s+(%b{})") do - local server = {} local server_name = block:match("[%\n%s]server_name%s(.-);") server_name = server_name and stringx.strip(server_name) or "stream" + local server = result[server_name] or {} result[server_name] = server for listen in block:gmatch("[%\n%s]listen%s(.-);") do listen = stringx.strip(listen) @@ -98,13 +98,28 @@ describe("#stream proxy interface listeners", function() stream_listen = "127.0.0.1:9011, 127.0.0.1:9012", })) - assert.equals(1, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) - assert.same({ - ["127.0.0.1:9011"] = 1, - ["127.0.0.1:9012"] = 2, - [1] = "127.0.0.1:9011", - [2] = "127.0.0.1:9012", - }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + if helpers.test_conf.database == "off" then + local stream_config_sock_path = "unix:" .. helpers.test_conf.prefix .. "/stream_config.sock" + + assert.equals(2, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) + assert.same({ + ["127.0.0.1:9011"] = 1, + ["127.0.0.1:9012"] = 2, + [stream_config_sock_path] = 3, + [1] = "127.0.0.1:9011", + [2] = "127.0.0.1:9012", + [3] = stream_config_sock_path, + }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + + else + assert.equals(1, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) + assert.same({ + ["127.0.0.1:9011"] = 1, + ["127.0.0.1:9012"] = 2, + [1] = "127.0.0.1:9011", + [2] = "127.0.0.1:9012", + }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + end for i = 9011, 9012 do local sock = ngx.socket.tcp() diff --git a/spec/fixtures/custom_nginx.template b/spec/fixtures/custom_nginx.template index 804e150892d6..d06309c22335 100644 --- a/spec/fixtures/custom_nginx.template +++ b/spec/fixtures/custom_nginx.template @@ -728,6 +728,18 @@ stream { Kong.log() } } + +> if database == "off" then + server { + listen unix:${{PREFIX}}/stream_config.sock; + + error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; + + content_by_lua_block { + Kong.stream_config_listener() + } + } +> end -- database == "off" > end -- #stream_listeners > 0 server { From a253da73606db0b0e894a13a869a73ff3bd961d3 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 19:27:00 -0700 Subject: [PATCH 08/31] tests(balancer) add a test case covering removal/addition of an identical target This test case covers the issue fixed by 5d8c87959cad18ae82518003199df010bc2c6a5c and recently investigated in FTI-1458. In the investigated issue, this invalid balancer state was reproduced in a node receiving updates from the cluster events. In this test case, we reproduce this state directly by querying the proxy port of the same node (i.e. the node receiving Admin API requests). --- .../10-balancer/01-ring-balancer_spec.lua | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/spec/02-integration/05-proxy/10-balancer/01-ring-balancer_spec.lua b/spec/02-integration/05-proxy/10-balancer/01-ring-balancer_spec.lua index 6a6be7cf0301..7654383c35d5 100644 --- a/spec/02-integration/05-proxy/10-balancer/01-ring-balancer_spec.lua +++ b/spec/02-integration/05-proxy/10-balancer/01-ring-balancer_spec.lua @@ -2790,6 +2790,62 @@ for _, strategy in helpers.each_strategy() do end) end) + + it("#db removing and adding the same target", function() + begin_testcase_setup(strategy, bp) + local upstream_name, upstream_id = add_upstream(bp) + local port = add_target(bp, upstream_id, localhost, nil, { weight = 100 }) + local api_host = add_api(bp, upstream_name) + end_testcase_setup(strategy, bp) + + local requests = 20 + + local server = http_server(localhost, port, { requests }) + local oks = client_requests(requests, api_host) + local _, count = server:done() + assert.equal(requests, oks) + assert.equal(requests, count) + + -- remove the target + local api_client = helpers.admin_client() + local res = assert(api_client:send { + method = "POST", + path = "/upstreams/" .. upstream_id .. "/targets", + headers = { + ["Content-Type"] = "application/json", + }, body = { + target = localhost .. ":" .. port, + weight = 0, + } + }) + assert.status(201, res) + + local server = http_server(localhost, port, { requests }) + local oks = client_requests(requests, api_host) + local _, count = server:done() + assert.equal(0, oks) + assert.equal(0, count) + + -- add the target back with same weight as initial weight + local api_client = helpers.admin_client() + local res = assert(api_client:send { + method = "POST", + path = "/upstreams/" .. upstream_id .. "/targets", + headers = { + ["Content-Type"] = "application/json", + }, body = { + target = localhost .. ":" .. port, + weight = 100, + } + }) + assert.status(201, res) + + local server = http_server(localhost, port, { requests }) + local oks = client_requests(requests, api_host) + local _, count = server:done() + assert.equal(requests, oks) + assert.equal(requests, count) + end) end) end) end -- for 'localhost' From 9f4ce86de598db816329dc4cc8e3be0fc7422d7b Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 19:40:32 -0700 Subject: [PATCH 09/31] fix(cluster_events) broadcast all events with nbf delay if needed Some cluster events such as `"balancer:post_health"` and `"balancer:targets"` were not properly passing along the `db_update_propagation` configuration property as the event's delay. Now, `kong.cache` is not aware of this property anymore, which has been fully encapsulated within `kong.cluster_events`, and systematically passed along to the cluster events' strategy during broadcast. --- kong/cache.lua | 15 ++------- kong/cluster_events/init.lua | 9 +++++ kong/global.lua | 9 +++-- .../01-cluster_events_spec.lua | 33 +++++++++++++++++++ 4 files changed, 48 insertions(+), 18 deletions(-) diff --git a/kong/cache.lua b/kong/cache.lua index e669d6c1ecdc..041a1ef47c7e 100644 --- a/kong/cache.lua +++ b/kong/cache.lua @@ -131,10 +131,6 @@ function _M.new(opts) error("opts.worker_events is required", 2) end - if opts.propagation_delay and type(opts.propagation_delay) ~= "number" then - error("opts.propagation_delay must be a number", 2) - end - if opts.ttl and type(opts.ttl) ~= "number" then error("opts.ttl must be a number", 2) end @@ -203,7 +199,6 @@ function _M.new(opts) end local self = { - propagation_delay = max(opts.propagation_delay or 0, 0), cluster_events = opts.cluster_events, mlcache = mlcaches[1], mlcaches = mlcaches, @@ -313,15 +308,9 @@ function _M:invalidate(key) self:invalidate_local(key) - local delay - if self.propagation_delay > 0 then - delay = self.propagation_delay - end - - log(DEBUG, "broadcasting (cluster) invalidation for key: '", key, "' ", - "with delay: '", delay or "none", "'") + log(DEBUG, "broadcasting (cluster) invalidation for key: '", key, "'") - local ok, err = self.cluster_events:broadcast("invalidations", key, delay) + local ok, err = self.cluster_events:broadcast("invalidations", key) if not ok then log(ERR, "failed to broadcast cached entity invalidation: ", err) end diff --git a/kong/cluster_events/init.lua b/kong/cluster_events/init.lua index 42bb76921178..0de12ade4a3d 100644 --- a/kong/cluster_events/init.lua +++ b/kong/cluster_events/init.lua @@ -69,6 +69,10 @@ function _M.new(opts) return error("opts.poll_offset must be a number") end + if opts.poll_delay and type(opts.poll_delay) ~= "number" then + return error("opts.poll_delay must be a number") + end + if not opts.db then return error("opts.db is required") end @@ -78,6 +82,7 @@ function _M.new(opts) local strategy local poll_interval = max(opts.poll_interval or 5, 0) local poll_offset = max(opts.poll_offset or 0, 0) + local poll_delay = max(opts.poll_delay or 0, 0) do local db_strategy @@ -109,6 +114,7 @@ function _M.new(opts) strategy = strategy, poll_interval = poll_interval, poll_offset = poll_offset, + poll_delay = poll_delay, node_id = nil, polling = false, channels = {}, @@ -152,6 +158,9 @@ function _M:broadcast(channel, data, delay) if delay and type(delay) ~= "number" then return nil, "delay must be a number" + + elseif self.poll_delay > 0 then + delay = self.poll_delay end -- insert event row diff --git a/kong/global.lua b/kong/global.lua index 14668e036710..b3d98cccc1e8 100644 --- a/kong/global.lua +++ b/kong/global.lua @@ -142,9 +142,10 @@ do function _GLOBAL.init_cluster_events(kong_config, db) return kong_cluster_events.new({ - db = db, - poll_interval = kong_config.db_update_frequency, - poll_offset = kong_config.db_update_propagation, + db = db, + poll_interval = kong_config.db_update_frequency, + poll_offset = kong_config.db_update_propagation, + poll_delay = kong_config.db_update_propagation, }) end @@ -161,7 +162,6 @@ do shm_name = "kong_db_cache", cluster_events = cluster_events, worker_events = worker_events, - propagation_delay = kong_config.db_update_propagation, ttl = db_cache_ttl, neg_ttl = db_cache_ttl, resurrect_ttl = kong_config.resurrect_ttl, @@ -186,7 +186,6 @@ do shm_name = "kong_core_db_cache", cluster_events = cluster_events, worker_events = worker_events, - propagation_delay = kong_config.db_update_propagation, ttl = db_cache_ttl, neg_ttl = db_cache_ttl, resurrect_ttl = kong_config.resurrect_ttl, diff --git a/spec/02-integration/06-invalidations/01-cluster_events_spec.lua b/spec/02-integration/06-invalidations/01-cluster_events_spec.lua index a2f9482e0553..b2831e7ebfbb 100644 --- a/spec/02-integration/06-invalidations/01-cluster_events_spec.lua +++ b/spec/02-integration/06-invalidations/01-cluster_events_spec.lua @@ -314,6 +314,39 @@ for _, strategy in helpers.each_strategy() do assert(cluster_events_1:poll()) assert.spy(spy_func).was_called(1) -- called end) + + it("broadcasts an event with a polling delay for subscribers", function() + local delay = 1 + + local cluster_events_1 = assert(kong_cluster_events.new { + db = db, + node_id = uuid_1, + poll_delay = delay, + }) + + local cluster_events_2 = assert(kong_cluster_events.new { + db = db, + node_id = uuid_2, + poll_delay = delay, + }) + + assert(cluster_events_1:subscribe("nbf_channel", cb, false)) -- false to not start auto polling + + assert(cluster_events_2:broadcast("nbf_channel", "hello world")) + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_not_called() -- not called yet + + ngx.sleep(0.001) -- still yield in case our timer is set to 0 + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_not_called() -- still not called + + ngx.sleep(delay) -- go past our desired `nbf` delay + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_called(1) -- called + end) end) end) end From e122c9a91941263c7eb4870be3b6d82fc554d9a0 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 19:56:34 -0700 Subject: [PATCH 10/31] perf(cluster_events) avoid running unnecessary nbf timers Prior to this fix, it would be frequent that events would run with a `0` nbf delay timer (quite unnecessarily and wasting available timer resources). This could happen when the nbf was short and when some latency adds to the cluster events DB reads, causing events to arrive in the polling loop fairly late and causing a negative nbf (which is canceled out by the surrounding `math.max()` call). --- kong/cluster_events/init.lua | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/kong/cluster_events/init.lua b/kong/cluster_events/init.lua index 0de12ade4a3d..c87e761afcfa 100644 --- a/kong/cluster_events/init.lua +++ b/kong/cluster_events/init.lua @@ -245,24 +245,26 @@ local function process_event(self, row, local_start_time) end for j = 1, #cbs do - if not row.nbf then - -- unique callback run without delay - local ok, err = pcall(cbs[j], row.data) - if not ok and not ngx_debug then - log(ERR, "callback threw an error: ", err) - end + local delay - else - -- unique callback run after some delay + if row.nbf and row.now then local now = row.now + max(ngx_now() - local_start_time, 0) - local delay = max(row.nbf - now, 0) + delay = max(row.nbf - now, 0) + end + if delay and delay > 0 then log(DEBUG, "delaying nbf event by ", delay, "s") local ok, err = timer_at(delay, nbf_cb_handler, cbs[j], row.data) if not ok then log(ERR, "failed to schedule nbf event timer: ", err) end + + else + local ok, err = pcall(cbs[j], row.data) + if not ok and not ngx_debug then + log(ERR, "callback threw an error: ", err) + end end end From 69afbba7e69a2be978b94bd065d098294e265089 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 20:08:32 -0700 Subject: [PATCH 11/31] fix(cluster_events) avoid potentially replaying events When cluster events are rapidly inserted into the database, and then polled by other nodes, those nodes will set a marker in the shm to ensure that if the event is polled again (e.g. because of polling offset or because of the ms grace period). This marker has an exptime that, if too short, could cause events that are polled again to be executed again. This increases the shm exptime to ensure that the execution marker lingers in the shm for at least 2 polling cycles. We also update the time before polling and before executing each event, in case a long running, non-yielding event would cause subsequent polled events to not trigger because their nbf isn't reached. --- kong/cluster_events/init.lua | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kong/cluster_events/init.lua b/kong/cluster_events/init.lua index c87e761afcfa..f896d0684163 100644 --- a/kong/cluster_events/init.lua +++ b/kong/cluster_events/init.lua @@ -10,6 +10,7 @@ local insert = table.insert local ngx_log = ngx.log local ngx_now = ngx.now local timer_at = ngx.timer.at +local ngx_update_time = ngx.update_time local knode = (kong and kong.node) and kong.node or require "kong.pdk.node".new() @@ -115,6 +116,7 @@ function _M.new(opts) poll_interval = poll_interval, poll_offset = poll_offset, poll_delay = poll_delay, + event_ttl_shm = poll_interval * 2 + poll_offset, node_id = nil, polling = false, channels = {}, @@ -229,12 +231,11 @@ local function process_event(self, row, local_start_time) end log(DEBUG, "new event (channel: '", row.channel, "') data: '", row.data, - "' nbf: '", row.nbf or "none", "'") - - local exptime = self.poll_interval + self.poll_offset + "' nbf: '", row.nbf or "none", "' shm exptime: ", + self.event_ttl_shm) -- mark as ran before running in case of long-running callbacks - local ok, err = self.events_shm:set(row.id, true, exptime) + local ok, err = self.events_shm:set(row.id, true, self.event_ttl_shm) if not ok then return nil, "failed to mark event as ran: " .. err end @@ -248,6 +249,7 @@ local function process_event(self, row, local_start_time) local delay if row.nbf and row.now then + ngx_update_time() local now = row.now + max(ngx_now() - local_start_time, 0) delay = max(row.nbf - now, 0) end @@ -304,6 +306,7 @@ local function poll(self) end end + ngx_update_time() local local_start_time = ngx_now() for i = 1, count do local ok, err = process_event(self, rows[i], local_start_time) From fde4a2a7f083ccc09d216598cebef094abfa88be Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 22:43:17 -0700 Subject: [PATCH 12/31] feat(cluster_event) be resilient when 'at' has been evicted from shm --- kong/cluster_events/init.lua | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/kong/cluster_events/init.lua b/kong/cluster_events/init.lua index f896d0684163..950261601072 100644 --- a/kong/cluster_events/init.lua +++ b/kong/cluster_events/init.lua @@ -282,15 +282,21 @@ local function poll(self) return nil, "failed to retrieve 'at' in shm: " .. err end - if not min_at then - return nil, "no 'at' in shm" - end - - -- apply grace period - - min_at = min_at - self.poll_offset - 0.001 + if min_at then + -- apply grace period + min_at = min_at - self.poll_offset - 0.001 + log(DEBUG, "polling events from: ", min_at) - log(DEBUG, "polling events from: ", min_at) + else + -- 'at' was evicted from 'kong' shm - safest is to resume fetching events + -- that may still be in the shm to ensure that we do not replay them + -- This is far from normal behavior, since the 'at' value should never + -- be evicted from the 'kong' shm (which should be frozen and never subject + -- to eviction, unless misused). + local now = self.strategy:server_time() or ngx_now() + min_at = now - self.event_ttl_shm + log(CRIT, "no 'at' in shm, polling events from: ", min_at) + end for rows, err, page in self.strategy:select_interval(self.channels, min_at) do if err then From fbe98f1e9b715ba195b833500be4b34515e4d958 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Wed, 22 Apr 2020 12:37:48 +0200 Subject: [PATCH 13/31] perf(balancer) do not compare history after a cleanup when the old-history is longer than the new history, then a cleanup happened, and comparing entry-by-entry is not needed --- kong/runloop/balancer.lua | 49 ++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/kong/runloop/balancer.lua b/kong/runloop/balancer.lua index 1cd3a86e9b63..7d5a9798c2a3 100644 --- a/kong/runloop/balancer.lua +++ b/kong/runloop/balancer.lua @@ -495,31 +495,38 @@ local function check_target_history(upstream, balancer) local old_size = #old_history local new_size = #new_history - -- compare balancer history with db-loaded history - local last_equal_index = 0 -- last index where history is the same - for i, entry in ipairs(old_history) do - local new_entry = new_history[i] - if new_entry and - new_entry.name == entry.name and - new_entry.port == entry.port and - new_entry.weight == entry.weight - then - last_equal_index = i - else - break + if new_size >= old_size then + -- compare balancer history with db-loaded history + local last_equal_index = 0 -- last index where history is the same + for i, entry in ipairs(old_history) do + local new_entry = new_history[i] + if new_entry and + new_entry.name == entry.name and + new_entry.port == entry.port and + new_entry.weight == entry.weight + then + last_equal_index = i + else + break + end end - end - if last_equal_index == new_size and new_size == old_size then - -- No history update is necessary in the balancer object. - return true - elseif last_equal_index == old_size then - -- history is the same, so we only need to add new entries - apply_history(balancer, new_history, last_equal_index + 1) - return true + if last_equal_index == old_size then + -- The history from which our balancer was build is still identical + if new_size == old_size then + -- No new targets, so no update is necessary in the balancer object + return true + end + + -- new_size > old_size in this case + -- history is the same, but we now have additional entries, apply them + apply_history(balancer, new_history, last_equal_index + 1) + return true + end end - -- history not the same. + -- History not the same. Either a history-cleanup happened, or due to + -- eventual-consistency a target showed up "in the past". -- TODO: ideally we would undo the last ones until we're equal again -- and can replay changes, but not supported by ring-balancer yet. -- for now; create a new balancer from scratch From cd9d2206620017271d1023736b6a45fae84a91de Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Wed, 22 Apr 2020 12:51:24 +0200 Subject: [PATCH 14/31] fix(balancer) delete oldest target entries first This is not a bug-fix, just a safeguard. If the cleanup was interrupted, the newer entries could have been deleted, so old entries might reappear. Consider history: now-10: target=localhost:80, weight=10 now-5 : target=localhost:80, weight=0 now : target=httpbin.org:80, weight=100 A cleanup would remove 'now-5' and 'now-10' in that order, so if something failed and only 'now-5' was deleted, then the target at 'now-10' would reappear. By deleting in the reverse order, a failure would not affect the end result. --- kong/db/dao/targets.lua | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kong/db/dao/targets.lua b/kong/db/dao/targets.lua index ec95d7de1029..f6cb66f272dc 100644 --- a/kong/db/dao/targets.lua +++ b/kong/db/dao/targets.lua @@ -65,7 +65,10 @@ local function clean_history(self, upstream_pk) ngx.log(ngx.NOTICE, "[Target DAO] Starting cleanup of target table for upstream ", tostring(upstream_pk.id)) local cnt = 0 - for _, entry in ipairs(delete) do + -- reverse again; so deleting oldest entries first + for i = #delete, 1, -1 do + local entry = delete[i] + -- notice super - this is real delete (not creating a new entity with weight = 0) self.super.delete(self, { id = entry.id }) -- ignoring errors here, deleted by id, so should not matter From 081518918fca19c12ad3b2b799c1abf33fa8b78d Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Tue, 21 Apr 2020 23:15:45 -0700 Subject: [PATCH 15/31] chore(deps) bump proxy-cache plugin to 1.3 https://github.com/Kong/kong-plugin-proxy-cache/releases/tag/1.3.1 --- kong-2.0.3-0.rockspec | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong-2.0.3-0.rockspec b/kong-2.0.3-0.rockspec index 352b98e1adc9..c72cb591c429 100644 --- a/kong-2.0.3-0.rockspec +++ b/kong-2.0.3-0.rockspec @@ -42,7 +42,7 @@ dependencies = { "kong-plugin-zipkin ~> 0.2", "kong-plugin-serverless-functions ~> 0.3", "kong-prometheus-plugin ~> 0.7", - "kong-proxy-cache-plugin ~> 1.2", + "kong-proxy-cache-plugin ~> 1.3", "kong-plugin-request-transformer ~> 1.2", "kong-plugin-session ~> 2.2", "kong-plugin-aws-lambda ~> 3.1", From dbfdbb661d0a69944251d3f14dcf72f949698914 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Wed, 22 Apr 2020 16:04:05 +0000 Subject: [PATCH 16/31] docs(changelog) modify 2.0.3 CHANGELOG.md formatting --- CHANGELOG.md | 76 +++++++++++++++++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6098fa2b4cc7..6fb1bfdb8f32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Table of Contents +- [2.0.4](#204) - [2.0.3](#203) - [2.0.2](#202) - [2.0.1](#201) @@ -42,6 +43,30 @@ - [0.9.9 and prior](#099---20170202) +## [2.0.4] + +> Released 2020/04/22 + +<<< TODO Introduction, plus any sections below >>> + +### Fixes + +##### Core + +##### CLI + +##### Configuration + +##### Admin API + +##### PDK + +##### Plugins + + +[Back to TOC](#table-of-contents) + + ## [2.0.3] > Released 2020/04/06 @@ -54,40 +79,40 @@ breaking changes. ##### Core - - Setting the target weight to 0 does not automatically remove the upstream. - [#5710](https://github.com/Kong/kong/pull/5710). - - The plugins iterator is now always fully built, even if the initialization - of any of them fails. - [#5692](https://github.com/Kong/kong/pull/5692). - - Fixed the load of `dns_not_found_ttl` and `dns_error_ttl` configuration - options. - [#5684](https://github.com/Kong/kong/pull/5684). - - Consumers and tags are properly warmed-up from the plugins' perspective, - i.e. they are loaded to the cache space that plugins access. - [#5669](https://github.com/Kong/kong/pull/5669). - - Customized error messages don't affect subsequent default error responses - now. - [#5673](https://github.com/Kong/kong/pull/5673). + - Setting the target weight to 0 does not automatically remove the upstream. + [#5710](https://github.com/Kong/kong/pull/5710). + - The plugins iterator is now always fully built, even if the initialization + of any of them fails. + [#5692](https://github.com/Kong/kong/pull/5692). + - Fixed the load of `dns_not_found_ttl` and `dns_error_ttl` configuration + options. + [#5684](https://github.com/Kong/kong/pull/5684). + - Consumers and tags are properly warmed-up from the plugins' perspective, + i.e. they are loaded to the cache space that plugins access. + [#5669](https://github.com/Kong/kong/pull/5669). + - Customized error messages don't affect subsequent default error responses + now. + [#5673](https://github.com/Kong/kong/pull/5673). ##### CLI - - Fixed the `lua_package_path` option precedence over `LUA_PATH` environment - variable. - [#5729](https://github.com/Kong/kong/pull/5729). - - Support to Nginx binary upgrade by correctly handling the `USR2` signal. - [#5657](https://github.com/Kong/kong/pull/5657). + - Fixed the `lua_package_path` option precedence over `LUA_PATH` environment + variable. + [#5729](https://github.com/Kong/kong/pull/5729). + - Support to Nginx binary upgrade by correctly handling the `USR2` signal. + [#5657](https://github.com/Kong/kong/pull/5657). ##### Configuration - - Fixed the logrotate configuration file with the right line terminators. - [#243](https://github.com/Kong/kong-build-tools/pull/243). - Thanks, [WALL-E](https://github.com/WALL-E) + - Fixed the logrotate configuration file with the right line terminators. + [#243](https://github.com/Kong/kong-build-tools/pull/243). + Thanks, [WALL-E](https://github.com/WALL-E) ##### Admin API - - Fixed the `sni is duplicated` error when sending multiple `SNIs` as body - arguments and an `SNI` on URL that matched one from the body. - [#5660](https://github.com/Kong/kong/pull/5660). + - Fixed the `sni is duplicated` error when sending multiple `SNIs` as body + arguments and an `SNI` on URL that matched one from the body. + [#5660](https://github.com/Kong/kong/pull/5660). [Back to TOC](#table-of-contents) @@ -4747,6 +4772,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) +[2.0.4]: https://github.com/Kong/kong/compare/2.0.3...2.0.4 [2.0.3]: https://github.com/Kong/kong/compare/2.0.2...2.0.3 [2.0.2]: https://github.com/Kong/kong/compare/2.0.1...2.0.2 [2.0.1]: https://github.com/Kong/kong/compare/2.0.0...2.0.1 From 9083519438dbc96dfe88c951028a54deea56b64d Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Wed, 22 Apr 2020 16:19:12 +0000 Subject: [PATCH 17/31] docs(changelog) add 2.0.4 changes --- CHANGELOG.md | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fb1bfdb8f32..522bc9ace26a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,21 +47,29 @@ > Released 2020/04/22 -<<< TODO Introduction, plus any sections below >>> - ### Fixes ##### Core -##### CLI + - Disable JIT mlcache:get_bulk() on ARM64 + [#5797](https://github.com/Kong/kong/pull/5797) + - Don't incrementing log counters on unexpected errors + [#5783](https://github.com/Kong/kong/pull/5783) + - Invalidate target history at cleanup so balancers stay synced + [#5775](https://github.com/Kong/kong/pull/5775) + - Set a log prefix with the upstream name + [#5773](https://github.com/Kong/kong/pull/5773) + - Fix memory leaks when loading a declarative config that fails schema validation + [#5766](https://github.com/Kong/kong/pull/5766) + - Fix some balancer and cluster_events issues + [#5804](https://github.com/Kong/kong/pull/5804) ##### Configuration -##### Admin API - -##### PDK - -##### Plugins + - Send declarative config updates to stream subsystem via Unix domain + [#5797](https://github.com/Kong/kong/pull/5797) + - Now when using declarative configurations the cache is purged on reload, cleaning any references to removed entries + [#5769](https://github.com/Kong/kong/pull/5769) [Back to TOC](#table-of-contents) From 7be7ab18644a1b5c48dea1bc62aec7b4db434d18 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Wed, 22 Apr 2020 16:20:04 +0000 Subject: [PATCH 18/31] release: 2.0.4 --- kong-2.0.3-0.rockspec => kong-2.0.4-0.rockspec | 4 ++-- kong/meta.lua | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) rename kong-2.0.3-0.rockspec => kong-2.0.4-0.rockspec (99%) diff --git a/kong-2.0.3-0.rockspec b/kong-2.0.4-0.rockspec similarity index 99% rename from kong-2.0.3-0.rockspec rename to kong-2.0.4-0.rockspec index c72cb591c429..f58db6c67f83 100644 --- a/kong-2.0.3-0.rockspec +++ b/kong-2.0.4-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "2.0.3-0" +version = "2.0.4-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "2.0.3" + tag = "2.0.4" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/meta.lua b/kong/meta.lua index 26014ff1be99..cfc88b8e869a 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,7 +1,7 @@ local version = setmetatable({ major = 2, minor = 0, - patch = 3, + patch = 4, -- suffix = "" }, { __tostring = function(t) From 85b262b3a7f7124e4a8a1a297f8ec6e11352ae88 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Fri, 24 Apr 2020 03:31:33 -0700 Subject: [PATCH 19/31] fix(release) let the release continue even if parts of it fail (#5815) --- Jenkinsfile | 1 - 1 file changed, 1 deletion(-) diff --git a/Jenkinsfile b/Jenkinsfile index 231b1f1d14dd..c1f5d56f732a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,6 @@ pipeline { } options { retry(1) - parallelsAlwaysFailFast() timeout(time: 2, unit: 'HOURS') } environment { From 8906d9415e5c117a1b92a33360fc896e0cbb4eda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Enrique=20Garc=C3=ADa=20Cota?= Date: Fri, 24 Apr 2020 17:25:06 +0200 Subject: [PATCH 20/31] tests(fixtures) replace expired SSL cert (#5818) --- spec/fixtures/ssl.lua | 71 +++++++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 29 deletions(-) diff --git a/spec/fixtures/ssl.lua b/spec/fixtures/ssl.lua index 72e0065a5aef..955b49d0cab4 100644 --- a/spec/fixtures/ssl.lua +++ b/spec/fixtures/ssl.lua @@ -1,34 +1,47 @@ return { + -- Version: 1 (0x0) + -- ... + -- Issuer: C = US, ST = California, L = San Francisco, O = Kong, OU = Core, CN = ssl-example.com + -- Validity + -- Not Before: Apr 24 14:36:29 2020 GMT + -- Not After : Feb 7 14:36:29 2294 GMT + -- ... + -- + -- Note: Version 1 was accomplished by using a openssl.cnf file + -- with the x509_extensions line commented out. + -- See https://stackoverflow.com/questions/26788244/how-to-create-a-legacy-v1-or-v2-x-509-cert-for-testing + -- and this line's commit message for more info cert = [[-----BEGIN CERTIFICATE----- -MIIFZjCCA04CCQCKonzfLctlLjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV -UzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEQ -MA4GA1UEChMHTWFzaGFwZTENMAsGA1UECxMES29uZzEYMBYGA1UEAxMPc3NsLWV4 -YW1wbGUuY29tMB4XDTE3MDUwOTIyMDI0OFoXDTIwMDQyMzIyMDI0OFowdTELMAkG -A1UEBhMCVVMxEzARBgNVBAgTCkNhbGlmb3JuaWExFjAUBgNVBAcTDVNhbiBGcmFu -Y2lzY28xEDAOBgNVBAoTB01hc2hhcGUxDTALBgNVBAsTBEtvbmcxGDAWBgNVBAMT -D3NzbC1leGFtcGxlLmNvbTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB -AMN3chlEaB0vfhFjUyIATYi2ck1tkY9AP1cTfYP8jVwMIBeMj8xCu+eJImGedz/w -XbLv+3MnYZkiTQLwXPsxFYNccIG31vI4dTQmGmmP9xHJaqUjsau/JnJACLI6H/3t -k3nbHBeqIUNFcJ3jc8iA+GJ7qtwmR1hummDwRPAxuNlyyVxfoRQnMtSr8jQGxypu -p+IBry+InRrVtJYg0AEpZivkqqmJF8taxMmYy2FaCHmRou0xlLeb8AwiusokljaS -W/I7pN5eQe5nCbV1rLg3GEifFmXkfw01suAfP5b/yDq+CjY5p/LRysCFLuIrSglk -yArbmntUNuLwFd/2Spl6rXtVyP24HwnELihOPLpdP0cZMAnfInRO0s+nx1rIm3p9 -s2By4cqSJkkAl+Dp5yXaNfFhbto72ipwgDdu2yXVMlufOkwrzN5ArUcDDuvJDyp2 -EpuhlR7T2LBzKPHLPGJ7KZLfzv87pK2e7dReg+eKD17HHzM6CRxl8VrKGu0RtLvi -XBzJbCdKaOr+1TDX5BvOhMQkN4zIWdmM4lPkZtq4Vp0Xuj8ZJsK6tZvKHYb+zlLq -/7FrvPDvF07wQIRQpfCzgiIZK6Letpla34Hnu4EVCpR58NXofox68+TxoxMLd/1+ -GnbyVtuN9kEjcmLC1pVJXXWQl/CbM5yPX3yeZmVEei0xAgMBAAEwDQYJKoZIhvcN -AQELBQADggIBALuKWzXiU/gHWJ40D69SsHjiG/iLZC4Zf0f0Y7OuSqa5KyZgJJi2 -D4A1ccR87aqqPsQ1qpdXiLGKm/s52lfzt6d8sqyJhOmg6KWXeWizq6jvGYcRT+XD -cxHm7N4KdHPH6ditTh8RGwwGRe3J+TzBXATdrDMRUC5q+A8Rf7j2gZ9MiRl4Kpgd -W6a5SS4bFNvdLIYMrdAIE/m6N9zE6Vom6aF4O71sqR+Id9IMExio5nmJRw08gpPU -nwu+psVr+iZvxIfuQCkRv+PXpdE28O5R0Wst3pmXqMeuz7iXBG2RGhGVo4Z+F8A9 -d/NmTt8W9HBgBdpMiwrqsMefuOYjNRea41kM5dBC4AaOlzA70LMmvgPo5LbeW4OP -fcxShQdxolP+UvML8jTh0CT4sQKO7gOz9p76Fmp+wb7rDA95MWSSjkRNwis0J+Ts -pA5kiMJRiwhWc//u3F/EibyOukMhg00CEAVnvTGIA3l1Ouk+x5iNcZc2ZEfgApt+ -kz2kyFVKDiziQvNx2CMA1677YdCWr/3ip2OxInd/EsfdksTDLgtq0fvADC7oCk0w -c0DZnnzGsBHsAMXootF4eI58q78O5Xb8FXd8TTLS9adkO5MEjmtPVJxe1A8UTxLL -QmTSCLTR0z09a85HeoWvEgp0jsmCmEI269U+2N9N6RtFkFyFCYROC/1b +MIIFbTCCA1UCFGjFyapVZYpvpKuYDJbLA1YJip++MA0GCSqGSIb3DQEBCwUAMHIx +CzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4g +RnJhbmNpc2NvMQ0wCwYDVQQKEwRLb25nMQ0wCwYDVQQLEwRDb3JlMRgwFgYDVQQD +Ew9zc2wtZXhhbXBsZS5jb20wIBcNMjAwNDI0MTQzNjI5WhgPMjI5NDAyMDcxNDM2 +MjlaMHIxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQH +Ew1TYW4gRnJhbmNpc2NvMQ0wCwYDVQQKEwRLb25nMQ0wCwYDVQQLEwRDb3JlMRgw +FgYDVQQDEw9zc2wtZXhhbXBsZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw +ggIKAoICAQDDd3IZRGgdL34RY1MiAE2ItnJNbZGPQD9XE32D/I1cDCAXjI/MQrvn +iSJhnnc/8F2y7/tzJ2GZIk0C8Fz7MRWDXHCBt9byOHU0Jhppj/cRyWqlI7GrvyZy +QAiyOh/97ZN52xwXqiFDRXCd43PIgPhie6rcJkdYbppg8ETwMbjZcslcX6EUJzLU +q/I0BscqbqfiAa8viJ0a1bSWINABKWYr5KqpiRfLWsTJmMthWgh5kaLtMZS3m/AM +IrrKJJY2klvyO6TeXkHuZwm1day4NxhInxZl5H8NNbLgHz+W/8g6vgo2Oafy0crA +hS7iK0oJZMgK25p7VDbi8BXf9kqZeq17Vcj9uB8JxC4oTjy6XT9HGTAJ3yJ0TtLP +p8dayJt6fbNgcuHKkiZJAJfg6ecl2jXxYW7aO9oqcIA3btsl1TJbnzpMK8zeQK1H +Aw7ryQ8qdhKboZUe09iwcyjxyzxieymS387/O6Stnu3UXoPnig9exx8zOgkcZfFa +yhrtEbS74lwcyWwnSmjq/tUw1+QbzoTEJDeMyFnZjOJT5GbauFadF7o/GSbCurWb +yh2G/s5S6v+xa7zw7xdO8ECEUKXws4IiGSui3raZWt+B57uBFQqUefDV6H6MevPk +8aMTC3f9fhp28lbbjfZBI3JiwtaVSV11kJfwmzOcj198nmZlRHotMQIDAQABMA0G +CSqGSIb3DQEBCwUAA4ICAQC51hH6cZrn+n8LiHlDaT/JFys3kKOQ4OpdpCUyUYzI +VYFnG/espH8LKzAiui4/LQjwygTkmNdp12GzIUsZItvpia2J4hsi7xNm/uKOhHwG +B1FViDF8FKOEihyMsZVAHIBj54RjuQ+WLbuQCjajX4PrK2La6lhWMn4cyvFWXCYB +A28Vrz/jXgXCXEct4+b2gZApOJ2H8qAyJv8JtFOptbB5mUZz3u3PW8/bTwG901/L +P9rWLq4AXT+UyPwBNs/lG4XXGc5uBfQjHkvamNKQP3usZuxAygdOEz6vJh9i0nyX +2b/+F/GLi8ZZwllapmp8c3WdsJkycBJ22VLS/LFNNvkz4sbT1dw5w1A7XJhiVDDZ +Dt9HMqK5qb4GAbaWwS+HPC63vrP6Ltw4QiAhC5x3bRujJ9CscRTVHXxMNw9b1TkQ +8AGgEFZKtbhirmv2/MQv+T57LQgnFPWNJWwv3YjJOIzDLEOeOxHMFV3Po5R5B2eP +qhLqmwYS6tQ/ih5BnlbZPBrArdVvsVCWLjQRy9qgetBlh+c65cL4HUAe/BxpXQSK +OoNpTQYMpSXlERwqm2/LN8rJl3XFlGtSH2xHucX8V3eN1bPURegkfplgPI+HDZDp +LAhXzHSQgW+cvcEL9Jafm5e5kRqDei4VSJteBfo+X/eTp0WnGJOYv0uJqwUJheNe +IQ== -----END CERTIFICATE-----]], key = [[-----BEGIN RSA PRIVATE KEY----- MIIJKAIBAAKCAgEAw3dyGURoHS9+EWNTIgBNiLZyTW2Rj0A/VxN9g/yNXAwgF4yP From 0872ea659ee3c58de2b6dae7c6eb387e46639f20 Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Mon, 27 Apr 2020 13:55:23 -0700 Subject: [PATCH 21/31] chore(deps) bump OpenSSL to 1.1.1g (#5810) --- .requirements | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.requirements b/.requirements index 9f7d1f3fdf7e..ff217a9eecd5 100644 --- a/.requirements +++ b/.requirements @@ -4,7 +4,7 @@ KONG_LICENSE="ASL 2.0" RESTY_VERSION=1.15.8.3 RESTY_LUAROCKS_VERSION=3.3.1 -RESTY_OPENSSL_VERSION=1.1.1f +RESTY_OPENSSL_VERSION=1.1.1g RESTY_PCRE_VERSION=8.44 LIBYAML_VERSION=0.2.2 KONG_GO_PLUGINSERVER_VERSION=v0.3.0 From 2dff49c2d9e0b430057ec833947a9d30c9d5f7c7 Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Wed, 29 Apr 2020 00:22:35 +0800 Subject: [PATCH 22/31] fix(conf_loader) do not apply additional processing when loading from * fix(conf_loader) do not apply additional processing when loading from `.kong_env` file Doing so could cause failures such as ones described in #5761, where a value of `"!abCDefGHijKL4\#1MN2OP3"` was first transformed to `"!abCDefGHijKL4#1MN2OP3"` and written to `.kong_env`. If we apply transformation to `"!abCDefGHijKL4#1MN2OP3"` again when loading from `.kong_env`, then it becomes `"!abCDefGHijKL4` which results in different value used by the CLI and worker. fixes #5761 --- kong/conf_loader.lua | 14 ++++++++------ spec/01-unit/03-conf_loader_spec.lua | 12 ++++++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/kong/conf_loader.lua b/kong/conf_loader.lua index 12a3300936fe..3a058b4c9c52 100644 --- a/kong/conf_loader.lua +++ b/kong/conf_loader.lua @@ -434,7 +434,7 @@ local _nop_tostring_mt = { -- Validate properties (type/enum/custom) and infer their type. -- @param[type=table] conf The configuration table to treat. -local function check_and_infer(conf) +local function check_and_infer(conf, opts) local errors = {} for k, value in pairs(conf) do @@ -442,10 +442,12 @@ local function check_and_infer(conf) local typ = v_schema.typ if type(value) == "string" then - -- remove trailing comment, if any - -- and remove escape chars from octothorpes - value = string.gsub(value, "[^\\]#.-$", "") - value = string.gsub(value, "\\#", "#") + if not opts.from_kong_env then + -- remove trailing comment, if any + -- and remove escape chars from octothorpes + value = string.gsub(value, "[^\\]#.-$", "") + value = string.gsub(value, "\\#", "#") + end value = pl_stringx.strip(value) end @@ -1108,7 +1110,7 @@ local function load(path, custom_conf, opts) user_conf) -- validation - local ok, err, errors = check_and_infer(conf) + local ok, err, errors = check_and_infer(conf, opts) if not opts.starting then log.enable() diff --git a/spec/01-unit/03-conf_loader_spec.lua b/spec/01-unit/03-conf_loader_spec.lua index 7ebdfdcf9cd2..8a5857ecaf8a 100644 --- a/spec/01-unit/03-conf_loader_spec.lua +++ b/spec/01-unit/03-conf_loader_spec.lua @@ -87,6 +87,18 @@ describe("Configuration loader", function() assert.True(conf.loaded_plugins["foo"]) assert.True(conf.loaded_plugins["bar"]) end) + it("apply # transformations when loading from config file directly", function() + local conf = assert(conf_loader(nil, { + pg_password = "!abCDefGHijKL4\\#1MN2OP3", + })) + assert.same("!abCDefGHijKL4#1MN2OP3", conf.pg_password) + end) + it("no longer applies # transformations when loading from .kong_env (issue #5761)", function() + local conf = assert(conf_loader(nil, { + pg_password = "!abCDefGHijKL4\\#1MN2OP3", + }, { from_kong_env = true, })) + assert.same("!abCDefGHijKL4\\#1MN2OP3", conf.pg_password) + end) it("loads custom plugins surrounded by spaces", function() local conf = assert(conf_loader(nil, { plugins = " hello-world , another-one " From 907a7957c86fafa55c71f58cfeb5030a1e6a4d73 Mon Sep 17 00:00:00 2001 From: Vinicius Mignot Date: Wed, 29 Apr 2020 13:57:32 -0300 Subject: [PATCH 23/31] fix(pdk) stop request processing on body encoding error (#5829) --- kong/pdk/response.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong/pdk/response.lua b/kong/pdk/response.lua index 8d1717a4452d..bc14d606ba50 100644 --- a/kong/pdk/response.lua +++ b/kong/pdk/response.lua @@ -556,7 +556,7 @@ local function new(self, major_version) local err json, err = cjson.encode(body) if err then - return nil, err + error(fmt("body encoding failed while flushing response: %s", err), 2) end end end From 49700d52be78c7f7e880f05cb19b13916131bbd6 Mon Sep 17 00:00:00 2001 From: Javier Date: Wed, 29 Apr 2020 13:11:32 -0500 Subject: [PATCH 24/31] fix(balancer) don't cache an empty upstream name dict (#5831) --- kong/runloop/balancer.lua | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kong/runloop/balancer.lua b/kong/runloop/balancer.lua index 7d5a9798c2a3..3e76b25e42da 100644 --- a/kong/runloop/balancer.lua +++ b/kong/runloop/balancer.lua @@ -546,6 +546,8 @@ local get_all_upstreams do local function load_upstreams_dict_into_memory() local upstreams_dict = {} + local found = nil + -- build a dictionary, indexed by the upstream name for up, err in singletons.db.upstreams:each() do if err then @@ -554,8 +556,10 @@ do end upstreams_dict[up.name] = up.id + found = true end - return upstreams_dict + + return found and upstreams_dict end _load_upstreams_dict_into_memory = load_upstreams_dict_into_memory From c38e421d58d86cb39bb62bf7b3e02a6b08ff0b1f Mon Sep 17 00:00:00 2001 From: Colin Hutchinson Date: Wed, 29 Apr 2020 17:11:39 -0400 Subject: [PATCH 25/31] chore(dependency) bump the kong-build-tools dependency (#5830) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index fbaf5378e72a..0c9dec29baa3 100644 --- a/Makefile +++ b/Makefile @@ -28,7 +28,7 @@ RESTY_VERSION ?= `grep RESTY_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk RESTY_LUAROCKS_VERSION ?= `grep RESTY_LUAROCKS_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` RESTY_OPENSSL_VERSION ?= `grep RESTY_OPENSSL_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` RESTY_PCRE_VERSION ?= `grep RESTY_PCRE_VERSION $(KONG_SOURCE_LOCATION)/.requirements | awk -F"=" '{print $$2}'` -KONG_BUILD_TOOLS ?= '4.3.1' +KONG_BUILD_TOOLS ?= '4.3.2' KONG_VERSION ?= `cat $(KONG_SOURCE_LOCATION)/kong-*.rockspec | grep tag | awk '{print $$3}' | sed 's/"//g'` OPENRESTY_PATCHES_BRANCH ?= master KONG_NGINX_MODULE_BRANCH ?= master From 39e7e44e58f4b31ba7e5633501734ed028f3ac65 Mon Sep 17 00:00:00 2001 From: Thibault Charbonnier Date: Wed, 29 Apr 2020 16:14:56 -0700 Subject: [PATCH 26/31] fix(init) properly check for core cache init errors --- kong/init.lua | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kong/init.lua b/kong/init.lua index 6d0c732e8b19..5a582d8dba07 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -502,7 +502,7 @@ function Kong.init_worker() kong.cache = cache local core_cache, err = kong_global.init_core_cache(kong.configuration, cluster_events, worker_events) - if not cache then + if not core_cache then stash_init_worker_error("failed to instantiate 'kong.core_cache' module: " .. err) return From 5aa079e536f6298dcc70e8cd54af60698cb59a3d Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Thu, 30 Apr 2020 12:13:40 +0200 Subject: [PATCH 27/31] docs(admin-api) add examples using arrays --- autodoc/data/admin-api.lua | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/autodoc/data/admin-api.lua b/autodoc/data/admin-api.lua index 5905223e3c49..28098631c9f5 100644 --- a/autodoc/data/admin-api.lua +++ b/autodoc/data/admin-api.lua @@ -78,6 +78,32 @@ return { config.limit=10&config.period=seconds ``` + When specifying arrays send the values in order, or use square brackets (numbering + inside the brackets is optional but if provided it must be 1-indexed, and + consecutive). An example route added to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths[1]=/path/one" \ + -d "paths[2]=/path/two" + ``` + + The following two are identical to the one above, but less explicit: + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths[]=/path/one" \ + -d "paths[]=/path/two" + + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths=/path/one" \ + -d "paths=/path/two" + ``` + + + - **application/json** Handy for complex bodies (ex: complex plugin configuration), in that case simply send @@ -91,6 +117,14 @@ return { } } ``` + + An example adding a route to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -H "Content-Type: application/json" \ + -d '{"name": "test-route", "paths": [ "/path/one", "/path/two" ]}' + ``` ]] }, }, From 6c1456ef6e07388c3779ddb700e9898582eb3173 Mon Sep 17 00:00:00 2001 From: Thijs Schreijer Date: Thu, 30 Apr 2020 12:52:11 +0200 Subject: [PATCH 28/31] docs(admin-api) sync with docs-site --- autodoc/data/admin-api.lua | 50 ++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/autodoc/data/admin-api.lua b/autodoc/data/admin-api.lua index 28098631c9f5..428851cbd2bc 100644 --- a/autodoc/data/admin-api.lua +++ b/autodoc/data/admin-api.lua @@ -66,7 +66,29 @@ return { }, { title = [[Supported Content Types]], text = [[ - The Admin API accepts 2 content types on every endpoint: + The Admin API accepts 3 content types on every endpoint: + + - **application/json** + + Handy for complex bodies (ex: complex plugin configuration), in that case simply send + a JSON representation of the data you want to send. Example: + + ```json + { + "config": { + "limit": 10, + "period": "seconds" + } + } + ``` + + An example adding a route to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -H "Content-Type: application/json" \ + -d '{"name": "test-route", "paths": [ "/path/one", "/path/two" ]}' + ``` - **application/x-www-form-urlencoded** @@ -103,27 +125,25 @@ return { ``` + - **multipart/form-data** - - **application/json** + Similar to url-encoded, this content type uses dotted keys to reference nested + objects. Here is an example of sending a Lua file to the pre-function Kong plugin: - Handy for complex bodies (ex: complex plugin configuration), in that case simply send - a JSON representation of the data you want to send. Example: - - ```json - { - "config": { - "limit": 10, - "period": "seconds" - } - } + ``` + curl -i -X POST http://localhost:8001/services/plugin-testing/plugins \ + -F "name=pre-function" \ + -F "config.access=@custom-auth.lua" ``` - An example adding a route to a service named `test-service`: + When specifying arrays for this content-type the array indices must be specified. + An example route added to a service named `test-service`: ``` curl -i -X POST http://localhost:8001/services/test-service/routes \ - -H "Content-Type: application/json" \ - -d '{"name": "test-route", "paths": [ "/path/one", "/path/two" ]}' + -F "name=test-route" \ + -F "paths[1]=/path/one" \ + -F "paths[2]=/path/two" ``` ]] }, From 29a731a7d7f442e104720a3ef6fbdc8ca44c9b13 Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Tue, 28 Apr 2020 08:39:19 -0700 Subject: [PATCH 29/31] fix(declarative) fix race condition when reloading config by making sure the correct config page is purged during load The issue is only present when Kong is running with multiple workers. Use #5789 as an example, the following events happens: 1. A new declarative config is uploaded via the Admin API 2. Kong is on cache page 2, `kong.core_cache:purge()` is called without the shadow flag, causes the page `1` to be purged immediately 3. New config is loaded into page 1 4. Kong switches to use page 1, then page 2 is purged immediately. The switch to use page 1 is not immediate and some workers may keep using the now empty page 2 for a short period of time. This may cause page 2 to be poisoned with an incorrect negative match. Which for #5789, is the `sni:example.com` for looking up certificate matches. This will cause `sni:example.com` to forever return `false` when next time switch to page 2 happens. This commit fixes the semantics of atomic page switching. With this commit the issue described inside #5789 no longer happens, and that the race window that Kong presents the default certificate between page flips because page 1 is sometimes purged prematurely while in use no longer happens. fixes #5789 --- kong/db/declarative/init.lua | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index bd5f44b613ee..208b8a31f758 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -363,8 +363,8 @@ function declarative.load_into_cache(entities, hash, shadow_page) -- but filtered for a given tag local tags_by_name = {} - kong.core_cache:purge() - kong.cache:purge() + kong.core_cache:purge(SHADOW) + kong.cache:purge(SHADOW) for entity_name, items in pairs(entities) do local dao = kong.db[entity_name] @@ -563,11 +563,8 @@ function declarative.load_into_cache_with_events(entities, hash) if ok ~= "done" then return nil, "failed to flip declarative config cache pages: " .. (err or ok) end - end - - kong.core_cache:purge(SHADOW) - if not ok then + else return nil, err end From 40dc14690240f62137cc360579940e926be9cd66 Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Wed, 29 Apr 2020 11:20:40 -0700 Subject: [PATCH 30/31] fix(declarative) purge DB-less cache when reloading Inside #5769 we did not correctly fix the issue outlined inside #5705. To properly address it we need to make sure the first page of the DB cache is empty before loading the declarative config into it. fix #5705 --- kong/init.lua | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/kong/init.lua b/kong/init.lua index 5a582d8dba07..46ec67dc677b 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -174,6 +174,19 @@ do ngx.shared.kong:flush_all() ngx.shared.kong:flush_expired(0) + local db_cache = { + "kong_core_db_cache", + "kong_db_cache", + -- no need to purge the second page for DB-less mode, as when reload + -- happens Kong always uses the first page afterwards + } + for _, shm in ipairs(db_cache) do + ngx.shared[shm]:flush_all() + ngx.shared[shm]:flush_expired(0) + ngx.shared[shm .. "_miss"]:flush_all() + ngx.shared[shm .. "_miss"]:flush_expired(0) + end + for _, key in ipairs(preserve_keys) do ngx.shared.kong:set(key, preserved[key]) end From 6e005c1992c40270db788a6bcdc277df34ceca41 Mon Sep 17 00:00:00 2001 From: Datong Sun Date: Tue, 5 May 2020 00:58:43 +0800 Subject: [PATCH 31/31] fix(declarative) use correct cache page when worker respawns (#5850) --- kong/cache.lua | 16 +++- kong/db/declarative/init.lua | 5 ++ .../11-dbless/01-respawn_spec.lua | 87 +++++++++++++++++++ spec/helpers.lua | 16 ++++ 4 files changed, 122 insertions(+), 2 deletions(-) create mode 100644 spec/02-integration/11-dbless/01-respawn_spec.lua diff --git a/kong/cache.lua b/kong/cache.lua index 041a1ef47c7e..24c9e4098d33 100644 --- a/kong/cache.lua +++ b/kong/cache.lua @@ -198,12 +198,18 @@ function _M.new(opts) end end + local curr_mlcache = 1 + + if opts.cache_pages == 2 then + curr_mlcache = ngx.shared.kong:get("kong:cache:" .. opts.shm_name .. ":curr_mlcache") or 1 + end + local self = { cluster_events = opts.cluster_events, - mlcache = mlcaches[1], + mlcache = mlcaches[curr_mlcache], mlcaches = mlcaches, shm_names = shm_names, - curr_mlcache = 1, + curr_mlcache = curr_mlcache, } local ok, err = self.cluster_events:subscribe("invalidations", function(key) @@ -221,6 +227,12 @@ function _M.new(opts) end +function _M:save_curr_page() + return ngx.shared.kong:set( + "kong:cache:" .. self.shm_names[1] .. ":curr_mlcache", self.curr_mlcache) +end + + function _M:get(key, opts, cb, ...) if type(key) ~= "string" then error("key must be a string", 2) diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index 208b8a31f758..048986d60e6e 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -568,6 +568,11 @@ function declarative.load_into_cache_with_events(entities, hash) return nil, err end + ok, err = kong.core_cache:save_curr_page() + if not ok then + return nil, "failed to persist cache page number inside shdict: " .. err + end + kong.core_cache:invalidate("router:version") ok, err = kong.worker_events.post("balancer", "upstreams", { diff --git a/spec/02-integration/11-dbless/01-respawn_spec.lua b/spec/02-integration/11-dbless/01-respawn_spec.lua new file mode 100644 index 000000000000..fae73ae75a69 --- /dev/null +++ b/spec/02-integration/11-dbless/01-respawn_spec.lua @@ -0,0 +1,87 @@ +local helpers = require "spec.helpers" + +describe("worker respawn", function() + local admin_client, proxy_client + + lazy_setup(function() + assert(helpers.start_kong({ + database = "off", + })) + end) + + lazy_teardown(function() + helpers.stop_kong(nil, true) + end) + + before_each(function() + admin_client = assert(helpers.admin_client()) + proxy_client = assert(helpers.proxy_client()) + end) + + after_each(function() + if admin_client then + admin_client:close() + end + + if proxy_client then + proxy_client:close() + end + end) + + it("lands on the correct cache page #5799", function() + local res = assert(admin_client:send { + method = "POST", + path = "/config", + body = { + config = [[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + + consumers: + - username: my-user + keyauth_credentials: + - key: my-key + ]], + }, + headers = { + ["Content-Type"] = "application/json" + } + }) + + assert.response(res).has.status(201) + + local res = assert(proxy_client:get("/")) + assert.res_status(401, res) + + res = assert(proxy_client:get("/", { + headers = { + apikey = "my-key" + } + })) + assert.res_status(200, res) + + -- kill all the workers forcing all of them to respawn + helpers.signal_workers(nil, "-TERM") + + proxy_client:close() + proxy_client = assert(helpers.proxy_client()) + + res = assert(proxy_client:get("/")) + assert.res_status(401, res) + + res = assert(proxy_client:get("/", { + headers = { + apikey = "my-key" + } + })) + assert.res_status(200, res) + end) +end) diff --git a/spec/helpers.lua b/spec/helpers.lua index 6fe5052ad838..af87e0db737f 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -2606,4 +2606,20 @@ end return kill.kill(pid_path, signal) end, + -- send signal to all Nginx workers, not including the master + signal_workers = function(prefix, signal, pid_path) + if not pid_path then + local running_conf = get_running_conf(prefix) + if not running_conf then + error("no config file found at prefix: " .. prefix) + end + + pid_path = running_conf.nginx_pid + end + + local cmd = string.format("pkill %s -P `cat %s`", signal, pid_path) + local _, code = pl_utils.execute(cmd) + + return code + end, }