diff --git a/.requirements b/.requirements index 1dea177db94a..4717be2006ba 100644 --- a/.requirements +++ b/.requirements @@ -4,7 +4,7 @@ KONG_LICENSE="ASL 2.0" RESTY_VERSION=1.15.8.3 RESTY_LUAROCKS_VERSION=3.3.1 -RESTY_OPENSSL_VERSION=1.1.1f +RESTY_OPENSSL_VERSION=1.1.1g RESTY_PCRE_VERSION=8.44 LIBYAML_VERSION=0.2.3 KONG_GO_PLUGINSERVER_VERSION=v0.3.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6098fa2b4cc7..522bc9ace26a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ # Table of Contents +- [2.0.4](#204) - [2.0.3](#203) - [2.0.2](#202) - [2.0.1](#201) @@ -42,6 +43,38 @@ - [0.9.9 and prior](#099---20170202) +## [2.0.4] + +> Released 2020/04/22 + +### Fixes + +##### Core + + - Disable JIT mlcache:get_bulk() on ARM64 + [#5797](https://github.com/Kong/kong/pull/5797) + - Don't incrementing log counters on unexpected errors + [#5783](https://github.com/Kong/kong/pull/5783) + - Invalidate target history at cleanup so balancers stay synced + [#5775](https://github.com/Kong/kong/pull/5775) + - Set a log prefix with the upstream name + [#5773](https://github.com/Kong/kong/pull/5773) + - Fix memory leaks when loading a declarative config that fails schema validation + [#5766](https://github.com/Kong/kong/pull/5766) + - Fix some balancer and cluster_events issues + [#5804](https://github.com/Kong/kong/pull/5804) + +##### Configuration + + - Send declarative config updates to stream subsystem via Unix domain + [#5797](https://github.com/Kong/kong/pull/5797) + - Now when using declarative configurations the cache is purged on reload, cleaning any references to removed entries + [#5769](https://github.com/Kong/kong/pull/5769) + + +[Back to TOC](#table-of-contents) + + ## [2.0.3] > Released 2020/04/06 @@ -54,40 +87,40 @@ breaking changes. ##### Core - - Setting the target weight to 0 does not automatically remove the upstream. - [#5710](https://github.com/Kong/kong/pull/5710). - - The plugins iterator is now always fully built, even if the initialization - of any of them fails. - [#5692](https://github.com/Kong/kong/pull/5692). - - Fixed the load of `dns_not_found_ttl` and `dns_error_ttl` configuration - options. - [#5684](https://github.com/Kong/kong/pull/5684). - - Consumers and tags are properly warmed-up from the plugins' perspective, - i.e. they are loaded to the cache space that plugins access. - [#5669](https://github.com/Kong/kong/pull/5669). - - Customized error messages don't affect subsequent default error responses - now. - [#5673](https://github.com/Kong/kong/pull/5673). + - Setting the target weight to 0 does not automatically remove the upstream. + [#5710](https://github.com/Kong/kong/pull/5710). + - The plugins iterator is now always fully built, even if the initialization + of any of them fails. + [#5692](https://github.com/Kong/kong/pull/5692). + - Fixed the load of `dns_not_found_ttl` and `dns_error_ttl` configuration + options. + [#5684](https://github.com/Kong/kong/pull/5684). + - Consumers and tags are properly warmed-up from the plugins' perspective, + i.e. they are loaded to the cache space that plugins access. + [#5669](https://github.com/Kong/kong/pull/5669). + - Customized error messages don't affect subsequent default error responses + now. + [#5673](https://github.com/Kong/kong/pull/5673). ##### CLI - - Fixed the `lua_package_path` option precedence over `LUA_PATH` environment - variable. - [#5729](https://github.com/Kong/kong/pull/5729). - - Support to Nginx binary upgrade by correctly handling the `USR2` signal. - [#5657](https://github.com/Kong/kong/pull/5657). + - Fixed the `lua_package_path` option precedence over `LUA_PATH` environment + variable. + [#5729](https://github.com/Kong/kong/pull/5729). + - Support to Nginx binary upgrade by correctly handling the `USR2` signal. + [#5657](https://github.com/Kong/kong/pull/5657). ##### Configuration - - Fixed the logrotate configuration file with the right line terminators. - [#243](https://github.com/Kong/kong-build-tools/pull/243). - Thanks, [WALL-E](https://github.com/WALL-E) + - Fixed the logrotate configuration file with the right line terminators. + [#243](https://github.com/Kong/kong-build-tools/pull/243). + Thanks, [WALL-E](https://github.com/WALL-E) ##### Admin API - - Fixed the `sni is duplicated` error when sending multiple `SNIs` as body - arguments and an `SNI` on URL that matched one from the body. - [#5660](https://github.com/Kong/kong/pull/5660). + - Fixed the `sni is duplicated` error when sending multiple `SNIs` as body + arguments and an `SNI` on URL that matched one from the body. + [#5660](https://github.com/Kong/kong/pull/5660). [Back to TOC](#table-of-contents) @@ -4747,6 +4780,7 @@ First version running with Cassandra. [Back to TOC](#table-of-contents) +[2.0.4]: https://github.com/Kong/kong/compare/2.0.3...2.0.4 [2.0.3]: https://github.com/Kong/kong/compare/2.0.2...2.0.3 [2.0.2]: https://github.com/Kong/kong/compare/2.0.1...2.0.2 [2.0.1]: https://github.com/Kong/kong/compare/2.0.0...2.0.1 diff --git a/Jenkinsfile b/Jenkinsfile index 231b1f1d14dd..c1f5d56f732a 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -5,7 +5,6 @@ pipeline { } options { retry(1) - parallelsAlwaysFailFast() timeout(time: 2, unit: 'HOURS') } environment { diff --git a/autodoc/data/admin-api.lua b/autodoc/data/admin-api.lua index 86b64c335761..25769c64efc4 100644 --- a/autodoc/data/admin-api.lua +++ b/autodoc/data/admin-api.lua @@ -66,17 +66,7 @@ return { }, { title = [[Supported Content Types]], text = [[ - The Admin API accepts 2 content types on every endpoint: - - - **application/x-www-form-urlencoded** - - Simple enough for basic request bodies, you will probably use it most of the time. - Note that when sending nested values, Kong expects nested objects to be referenced - with dotted keys. Example: - - ``` - config.limit=10&config.period=seconds - ``` + The Admin API accepts 3 content types on every endpoint: - **application/json** @@ -91,6 +81,70 @@ return { } } ``` + + An example adding a route to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -H "Content-Type: application/json" \ + -d '{"name": "test-route", "paths": [ "/path/one", "/path/two" ]}' + ``` + + - **application/x-www-form-urlencoded** + + Simple enough for basic request bodies, you will probably use it most of the time. + Note that when sending nested values, Kong expects nested objects to be referenced + with dotted keys. Example: + + ``` + config.limit=10&config.period=seconds + ``` + + When specifying arrays send the values in order, or use square brackets (numbering + inside the brackets is optional but if provided it must be 1-indexed, and + consecutive). An example route added to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths[1]=/path/one" \ + -d "paths[2]=/path/two" + ``` + + The following two are identical to the one above, but less explicit: + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths[]=/path/one" \ + -d "paths[]=/path/two" + + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -d "name=test-route" \ + -d "paths=/path/one" \ + -d "paths=/path/two" + ``` + + + - **multipart/form-data** + + Similar to url-encoded, this content type uses dotted keys to reference nested + objects. Here is an example of sending a Lua file to the pre-function Kong plugin: + + ``` + curl -i -X POST http://localhost:8001/services/plugin-testing/plugins \ + -F "name=pre-function" \ + -F "config.access=@custom-auth.lua" + ``` + + When specifying arrays for this content-type the array indices must be specified. + An example route added to a service named `test-service`: + + ``` + curl -i -X POST http://localhost:8001/services/test-service/routes \ + -F "name=test-route" \ + -F "paths[1]=/path/one" \ + -F "paths[2]=/path/two" + ``` ]] }, }, diff --git a/kong-2.0.3-0.rockspec b/kong-2.0.4-0.rockspec similarity index 99% rename from kong-2.0.3-0.rockspec rename to kong-2.0.4-0.rockspec index 64b23f83b6c2..ca3ca445c8a2 100644 --- a/kong-2.0.3-0.rockspec +++ b/kong-2.0.4-0.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "2.0.3-0" +version = "2.0.4-0" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Kong/kong", - tag = "2.0.3" + tag = "2.0.4" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", diff --git a/kong/cache.lua b/kong/cache.lua index e669d6c1ecdc..24c9e4098d33 100644 --- a/kong/cache.lua +++ b/kong/cache.lua @@ -131,10 +131,6 @@ function _M.new(opts) error("opts.worker_events is required", 2) end - if opts.propagation_delay and type(opts.propagation_delay) ~= "number" then - error("opts.propagation_delay must be a number", 2) - end - if opts.ttl and type(opts.ttl) ~= "number" then error("opts.ttl must be a number", 2) end @@ -202,13 +198,18 @@ function _M.new(opts) end end + local curr_mlcache = 1 + + if opts.cache_pages == 2 then + curr_mlcache = ngx.shared.kong:get("kong:cache:" .. opts.shm_name .. ":curr_mlcache") or 1 + end + local self = { - propagation_delay = max(opts.propagation_delay or 0, 0), cluster_events = opts.cluster_events, - mlcache = mlcaches[1], + mlcache = mlcaches[curr_mlcache], mlcaches = mlcaches, shm_names = shm_names, - curr_mlcache = 1, + curr_mlcache = curr_mlcache, } local ok, err = self.cluster_events:subscribe("invalidations", function(key) @@ -226,6 +227,12 @@ function _M.new(opts) end +function _M:save_curr_page() + return ngx.shared.kong:set( + "kong:cache:" .. self.shm_names[1] .. ":curr_mlcache", self.curr_mlcache) +end + + function _M:get(key, opts, cb, ...) if type(key) ~= "string" then error("key must be a string", 2) @@ -313,15 +320,9 @@ function _M:invalidate(key) self:invalidate_local(key) - local delay - if self.propagation_delay > 0 then - delay = self.propagation_delay - end - - log(DEBUG, "broadcasting (cluster) invalidation for key: '", key, "' ", - "with delay: '", delay or "none", "'") + log(DEBUG, "broadcasting (cluster) invalidation for key: '", key, "'") - local ok, err = self.cluster_events:broadcast("invalidations", key, delay) + local ok, err = self.cluster_events:broadcast("invalidations", key) if not ok then log(ERR, "failed to broadcast cached entity invalidation: ", err) end diff --git a/kong/cluster_events/init.lua b/kong/cluster_events/init.lua index 42bb76921178..950261601072 100644 --- a/kong/cluster_events/init.lua +++ b/kong/cluster_events/init.lua @@ -10,6 +10,7 @@ local insert = table.insert local ngx_log = ngx.log local ngx_now = ngx.now local timer_at = ngx.timer.at +local ngx_update_time = ngx.update_time local knode = (kong and kong.node) and kong.node or require "kong.pdk.node".new() @@ -69,6 +70,10 @@ function _M.new(opts) return error("opts.poll_offset must be a number") end + if opts.poll_delay and type(opts.poll_delay) ~= "number" then + return error("opts.poll_delay must be a number") + end + if not opts.db then return error("opts.db is required") end @@ -78,6 +83,7 @@ function _M.new(opts) local strategy local poll_interval = max(opts.poll_interval or 5, 0) local poll_offset = max(opts.poll_offset or 0, 0) + local poll_delay = max(opts.poll_delay or 0, 0) do local db_strategy @@ -109,6 +115,8 @@ function _M.new(opts) strategy = strategy, poll_interval = poll_interval, poll_offset = poll_offset, + poll_delay = poll_delay, + event_ttl_shm = poll_interval * 2 + poll_offset, node_id = nil, polling = false, channels = {}, @@ -152,6 +160,9 @@ function _M:broadcast(channel, data, delay) if delay and type(delay) ~= "number" then return nil, "delay must be a number" + + elseif self.poll_delay > 0 then + delay = self.poll_delay end -- insert event row @@ -220,12 +231,11 @@ local function process_event(self, row, local_start_time) end log(DEBUG, "new event (channel: '", row.channel, "') data: '", row.data, - "' nbf: '", row.nbf or "none", "'") - - local exptime = self.poll_interval + self.poll_offset + "' nbf: '", row.nbf or "none", "' shm exptime: ", + self.event_ttl_shm) -- mark as ran before running in case of long-running callbacks - local ok, err = self.events_shm:set(row.id, true, exptime) + local ok, err = self.events_shm:set(row.id, true, self.event_ttl_shm) if not ok then return nil, "failed to mark event as ran: " .. err end @@ -236,24 +246,27 @@ local function process_event(self, row, local_start_time) end for j = 1, #cbs do - if not row.nbf then - -- unique callback run without delay - local ok, err = pcall(cbs[j], row.data) - if not ok and not ngx_debug then - log(ERR, "callback threw an error: ", err) - end + local delay - else - -- unique callback run after some delay + if row.nbf and row.now then + ngx_update_time() local now = row.now + max(ngx_now() - local_start_time, 0) - local delay = max(row.nbf - now, 0) + delay = max(row.nbf - now, 0) + end + if delay and delay > 0 then log(DEBUG, "delaying nbf event by ", delay, "s") local ok, err = timer_at(delay, nbf_cb_handler, cbs[j], row.data) if not ok then log(ERR, "failed to schedule nbf event timer: ", err) end + + else + local ok, err = pcall(cbs[j], row.data) + if not ok and not ngx_debug then + log(ERR, "callback threw an error: ", err) + end end end @@ -269,15 +282,21 @@ local function poll(self) return nil, "failed to retrieve 'at' in shm: " .. err end - if not min_at then - return nil, "no 'at' in shm" - end - - -- apply grace period - - min_at = min_at - self.poll_offset - 0.001 + if min_at then + -- apply grace period + min_at = min_at - self.poll_offset - 0.001 + log(DEBUG, "polling events from: ", min_at) - log(DEBUG, "polling events from: ", min_at) + else + -- 'at' was evicted from 'kong' shm - safest is to resume fetching events + -- that may still be in the shm to ensure that we do not replay them + -- This is far from normal behavior, since the 'at' value should never + -- be evicted from the 'kong' shm (which should be frozen and never subject + -- to eviction, unless misused). + local now = self.strategy:server_time() or ngx_now() + min_at = now - self.event_ttl_shm + log(CRIT, "no 'at' in shm, polling events from: ", min_at) + end for rows, err, page in self.strategy:select_interval(self.channels, min_at) do if err then @@ -293,6 +312,7 @@ local function poll(self) end end + ngx_update_time() local local_start_time = ngx_now() for i = 1, count do local ok, err = process_event(self, rows[i], local_start_time) diff --git a/kong/conf_loader.lua b/kong/conf_loader.lua index aae8cd2bd473..93c06d9bebbe 100644 --- a/kong/conf_loader.lua +++ b/kong/conf_loader.lua @@ -464,6 +464,7 @@ local CONF_INFERENCES = { cluster_mtls = { enum = { "shared", "pki" } }, cluster_ca_cert = { typ = "string" }, cluster_server_name = { typ = "string" }, + kic = { typ = "boolean" }, } @@ -501,7 +502,7 @@ local _nop_tostring_mt = { -- Validate properties (type/enum/custom) and infer their type. -- @param[type=table] conf The configuration table to treat. -local function check_and_infer(conf) +local function check_and_infer(conf, opts) local errors = {} for k, value in pairs(conf) do @@ -509,10 +510,12 @@ local function check_and_infer(conf) local typ = v_schema.typ if type(value) == "string" then - -- remove trailing comment, if any - -- and remove escape chars from octothorpes - value = string.gsub(value, "[^\\]#.-$", "") - value = string.gsub(value, "\\#", "#") + if not opts.from_kong_env then + -- remove trailing comment, if any + -- and remove escape chars from octothorpes + value = string.gsub(value, "[^\\]#.-$", "") + value = string.gsub(value, "\\#", "#") + end value = pl_stringx.strip(value) end @@ -1246,7 +1249,7 @@ local function load(path, custom_conf, opts) user_conf) -- validation - local ok, err, errors = check_and_infer(conf) + local ok, err, errors = check_and_infer(conf, opts) if not opts.starting then log.enable() diff --git a/kong/db/dao/targets.lua b/kong/db/dao/targets.lua index ec95d7de1029..f6cb66f272dc 100644 --- a/kong/db/dao/targets.lua +++ b/kong/db/dao/targets.lua @@ -65,7 +65,10 @@ local function clean_history(self, upstream_pk) ngx.log(ngx.NOTICE, "[Target DAO] Starting cleanup of target table for upstream ", tostring(upstream_pk.id)) local cnt = 0 - for _, entry in ipairs(delete) do + -- reverse again; so deleting oldest entries first + for i = #delete, 1, -1 do + local entry = delete[i] + -- notice super - this is real delete (not creating a new entity with weight = 0) self.super.delete(self, { id = entry.id }) -- ignoring errors here, deleted by id, so should not matter diff --git a/kong/db/declarative/init.lua b/kong/db/declarative/init.lua index c80ea9de2077..048986d60e6e 100644 --- a/kong/db/declarative/init.lua +++ b/kong/db/declarative/init.lua @@ -10,7 +10,10 @@ local deepcopy = tablex.deepcopy local null = ngx.null local SHADOW = true local md5 = ngx.md5 +local ngx_socket_tcp = ngx.socket.tcp local REMOVE_FIRST_LINE_PATTERN = "^[^\n]+\n(.+)$" +local PREFIX = ngx.config.prefix() +local SUBSYS = ngx.config.subsystem local declarative = {} @@ -360,6 +363,9 @@ function declarative.load_into_cache(entities, hash, shadow_page) -- but filtered for a given tag local tags_by_name = {} + kong.core_cache:purge(SHADOW) + kong.cache:purge(SHADOW) + for entity_name, items in pairs(entities) do local dao = kong.db[entity_name] local schema = dao.schema @@ -518,6 +524,30 @@ function declarative.load_into_cache_with_events(entities, hash) return nil, err end + if SUBSYS == "http" and #kong.configuration.stream_listeners > 0 and + ngx.get_phase() ~= "init_worker" + then + -- update stream if necessary + -- TODO: remove this once shdict can be shared between subsystems + + local sock = ngx_socket_tcp() + ok, err = sock:connect("unix:" .. PREFIX .. "/stream_config.sock") + if not ok then + return nil, err + end + + local json = cjson.encode({ entities, hash, }) + local bytes + bytes, err = sock:send(json) + sock:close() + + if not bytes then + return nil, err + end + + assert(bytes == #json, "incomplete config sent to the stream subsystem") + end + ok, err = kong.worker_events.post("balancer", "upstreams", { operation = "delete_all", entity = { id = "all", name = "all" } @@ -533,12 +563,14 @@ function declarative.load_into_cache_with_events(entities, hash) if ok ~= "done" then return nil, "failed to flip declarative config cache pages: " .. (err or ok) end - end - kong.core_cache:purge(SHADOW) + else + return nil, err + end + ok, err = kong.core_cache:save_curr_page() if not ok then - return nil, err + return nil, "failed to persist cache page number inside shdict: " .. err end kong.core_cache:invalidate("router:version") diff --git a/kong/global.lua b/kong/global.lua index 51f9e45470be..e318e34b0612 100644 --- a/kong/global.lua +++ b/kong/global.lua @@ -156,9 +156,10 @@ do function _GLOBAL.init_cluster_events(kong_config, db) return kong_cluster_events.new({ - db = db, - poll_interval = kong_config.db_update_frequency, - poll_offset = kong_config.db_update_propagation, + db = db, + poll_interval = kong_config.db_update_frequency, + poll_offset = kong_config.db_update_propagation, + poll_delay = kong_config.db_update_propagation, }) end @@ -177,7 +178,6 @@ do shm_name = "kong_db_cache", cluster_events = cluster_events, worker_events = worker_events, - propagation_delay = kong_config.db_update_propagation, ttl = db_cache_ttl, neg_ttl = db_cache_neg_ttl or db_cache_ttl, resurrect_ttl = kong_config.resurrect_ttl, @@ -204,7 +204,6 @@ do shm_name = "kong_core_db_cache", cluster_events = cluster_events, worker_events = worker_events, - propagation_delay = kong_config.db_update_propagation, ttl = db_cache_ttl, neg_ttl = db_cache_neg_ttl or db_cache_ttl, resurrect_ttl = kong_config.resurrect_ttl, diff --git a/kong/init.lua b/kong/init.lua index d2b3e10995e6..46ec67dc677b 100644 --- a/kong/init.lua +++ b/kong/init.lua @@ -174,6 +174,19 @@ do ngx.shared.kong:flush_all() ngx.shared.kong:flush_expired(0) + local db_cache = { + "kong_core_db_cache", + "kong_db_cache", + -- no need to purge the second page for DB-less mode, as when reload + -- happens Kong always uses the first page afterwards + } + for _, shm in ipairs(db_cache) do + ngx.shared[shm]:flush_all() + ngx.shared[shm]:flush_expired(0) + ngx.shared[shm .. "_miss"]:flush_all() + ngx.shared[shm .. "_miss"]:flush_expired(0) + end + for _, key in ipairs(preserve_keys) do ngx.shared.kong:set(key, preserved[key]) end @@ -419,7 +432,9 @@ function Kong.init() certificate.init() end - clustering.init(config) + if subsystem == "http" then + clustering.init(config) + end -- Load plugins as late as possible so that everything is set up assert(db.plugins:load_plugin_schemas(config.loaded_plugins)) @@ -500,7 +515,7 @@ function Kong.init_worker() kong.cache = cache local core_cache, err = kong_global.init_core_cache(kong.configuration, cluster_events, worker_events) - if not cache then + if not core_cache then stash_init_worker_error("failed to instantiate 'kong.core_cache' module: " .. err) return @@ -550,7 +565,9 @@ function Kong.init_worker() go.manage_pluginserver() end - clustering.init_worker(kong.configuration) + if subsystem == "http" then + clustering.init_worker(kong.configuration) + end end @@ -1155,4 +1172,45 @@ function Kong.serve_cluster_listener(options) end +do + local declarative = require("kong.db.declarative") + local cjson = require("cjson.safe") + + function Kong.stream_config_listener() + local sock, err = ngx.req.socket() + if not sock then + kong.log.crit("unable to obtain request socket: ", err) + return + end + + local data, err = sock:receive("*a") + if not data then + ngx_log(ngx_CRIT, "unable to receive new config: ", err) + return + end + + local parsed + parsed, err = cjson.decode(data) + if not parsed then + kong.log.err("unable to parse received declarative config: ", err) + return + end + + local ok, err = concurrency.with_worker_mutex({ name = "dbless-worker" }, function() + return declarative.load_into_cache_with_events(parsed[1], parsed[2]) + end) + + if not ok then + if err == "no memory" then + kong.log.err("not enough cache space for declarative config, " .. + "consider raising the \"mem_cache_size\" Kong config") + + else + kong.log.err("failed loading declarative config into cache: ", err) + end + end + end +end + + return Kong diff --git a/kong/meta.lua b/kong/meta.lua index 26014ff1be99..cfc88b8e869a 100644 --- a/kong/meta.lua +++ b/kong/meta.lua @@ -1,7 +1,7 @@ local version = setmetatable({ major = 2, minor = 0, - patch = 3, + patch = 4, -- suffix = "" }, { __tostring = function(t) diff --git a/kong/pdk/response.lua b/kong/pdk/response.lua index 63b92f750c22..4fc005abf368 100644 --- a/kong/pdk/response.lua +++ b/kong/pdk/response.lua @@ -607,7 +607,7 @@ local function new(self, major_version) local err json, err = cjson.encode(body) if err then - return nil, err + error(fmt("body encoding failed while flushing response: %s", err), 2) end end end diff --git a/kong/reports.lua b/kong/reports.lua index 00223bb90c5b..522b601e2b52 100644 --- a/kong/reports.lua +++ b/kong/reports.lua @@ -205,7 +205,7 @@ end -- returns a string indicating the "kind" of the current request/stream: -- "http", "https", "h2c", "h2", "grpc", "grpcs", "ws", "wss", "tcp", "tls" -- or nil + error message if the suffix could not be determined -local function get_current_suffix() +local function get_current_suffix(ctx) if subsystem == "stream" then if var.ssl_protocol then return "tls" @@ -215,8 +215,8 @@ local function get_current_suffix() end local scheme = var.scheme + local proxy_mode = var.kong_proxy_mode if scheme == "http" or scheme == "https" then - local proxy_mode = var.kong_proxy_mode if proxy_mode == "http" then local http_upgrade = var.http_upgrade if http_upgrade and lower(http_upgrade) == "websocket" then @@ -235,7 +235,7 @@ local function get_current_suffix() return "h2" end - return scheme + return scheme -- http/https end if proxy_mode == "grpc" then @@ -249,7 +249,12 @@ local function get_current_suffix() end end - return nil, "unknown request scheme: " .. tostring(scheme) + if ctx.KONG_UNEXPECTED then + return nil + end + + log(WARN, "could not determine log suffix (scheme=", tostring(scheme), + ", proxy_mode=", tostring(proxy_mode), ")") end @@ -336,6 +341,7 @@ local function configure_ping(kong_conf) add_immutable_value("database", kong_conf.database) add_immutable_value("role", kong_conf.role) + add_immutable_value("kic", kong_conf.kic) add_immutable_value("_admin", #kong_conf.admin_listeners > 0 and 1 or 0) add_immutable_value("_proxy", #kong_conf.proxy_listeners > 0 and 1 or 0) add_immutable_value("_stream", #kong_conf.stream_listeners > 0 and 1 or 0) @@ -404,17 +410,15 @@ return { local count_key = subsystem == "stream" and STREAM_COUNT_KEY or REQUEST_COUNT_KEY - incr_counter(count_key) - local suffix, err = get_current_suffix() + + if ctx.ran_go_plugin then + incr_counter(GO_PLUGINS_REQUEST_COUNT_KEY) + end + + local suffix = get_current_suffix(ctx) if suffix then incr_counter(count_key .. ":" .. suffix) - - if ctx.ran_go_plugin then - incr_counter(GO_PLUGINS_REQUEST_COUNT_KEY) - end - else - log(WARN, err) end end, diff --git a/kong/runloop/balancer.lua b/kong/runloop/balancer.lua index 2eebd65b5e2a..3ba0d9a70aca 100644 --- a/kong/runloop/balancer.lua +++ b/kong/runloop/balancer.lua @@ -524,31 +524,38 @@ local function check_target_history(upstream, balancer) local old_size = #old_history local new_size = #new_history - -- compare balancer history with db-loaded history - local last_equal_index = 0 -- last index where history is the same - for i, entry in ipairs(old_history) do - local new_entry = new_history[i] - if new_entry and - new_entry.name == entry.name and - new_entry.port == entry.port and - new_entry.weight == entry.weight - then - last_equal_index = i - else - break + if new_size >= old_size then + -- compare balancer history with db-loaded history + local last_equal_index = 0 -- last index where history is the same + for i, entry in ipairs(old_history) do + local new_entry = new_history[i] + if new_entry and + new_entry.name == entry.name and + new_entry.port == entry.port and + new_entry.weight == entry.weight + then + last_equal_index = i + else + break + end end - end - if last_equal_index == new_size and new_size == old_size then - -- No history update is necessary in the balancer object. - return true - elseif last_equal_index == old_size then - -- history is the same, so we only need to add new entries - apply_history(balancer, new_history, last_equal_index + 1) - return true + if last_equal_index == old_size then + -- The history from which our balancer was build is still identical + if new_size == old_size then + -- No new targets, so no update is necessary in the balancer object + return true + end + + -- new_size > old_size in this case + -- history is the same, but we now have additional entries, apply them + apply_history(balancer, new_history, last_equal_index + 1) + return true + end end - -- history not the same. + -- History not the same. Either a history-cleanup happened, or due to + -- eventual-consistency a target showed up "in the past". -- TODO: ideally we would undo the last ones until we're equal again -- and can replay changes, but not supported by ring-balancer yet. -- for now; create a new balancer from scratch @@ -563,8 +570,10 @@ local function check_target_history(upstream, balancer) return true end + local function load_upstreams_dict_into_memory() local upstreams_dict = {} + -- build a dictionary, indexed by the upstream name for up, err in singletons.db.upstreams:each() do if err then @@ -574,17 +583,21 @@ local function load_upstreams_dict_into_memory() upstreams_dict[up.name] = up.id end + return upstreams_dict end _load_upstreams_dict_into_memory = load_upstreams_dict_into_memory + +local opts = { neg_ttl = 10 } + + ------------------------------------------------------------------------------ -- Implements a simple dictionary with all upstream-ids indexed -- by their name. -- @return The upstreams dictionary (a map with upstream names as string keys -- and upstream entity tables as values), or nil+error local function get_all_upstreams() - local opts = { neg_ttl = 10 } if kong.configuration.worker_consistency == "eventual" then return singletons.core_cache:get("balancer:upstreams", opts, noop) end diff --git a/kong/runloop/certificate.lua b/kong/runloop/certificate.lua index 137e0517f0bb..1d91636bdf84 100644 --- a/kong/runloop/certificate.lua +++ b/kong/runloop/certificate.lua @@ -3,6 +3,11 @@ local ngx_ssl = require "ngx.ssl" local pl_utils = require "pl.utils" local mlcache = require "resty.mlcache" +if jit.arch == 'arm64' then + jit.off(mlcache.get_bulk) -- "temporary" workaround for issue #5748 on ARM +end + + local ngx_log = ngx.log local ERR = ngx.ERR diff --git a/kong/templates/kong_defaults.lua b/kong/templates/kong_defaults.lua index d9dc72cbc386..c611176eab23 100644 --- a/kong/templates/kong_defaults.lua +++ b/kong/templates/kong_defaults.lua @@ -151,4 +151,5 @@ lua_package_path = ./?.lua;./?/init.lua; lua_package_cpath = NONE role = traditional +kic = off ]] diff --git a/kong/templates/nginx_kong_stream.lua b/kong/templates/nginx_kong_stream.lua index eaea0e9dff09..2a7a87cfc8e2 100644 --- a/kong/templates/nginx_kong_stream.lua +++ b/kong/templates/nginx_kong_stream.lua @@ -119,5 +119,17 @@ server { Kong.log() } } + +> if database == "off" then +server { + listen unix:${{PREFIX}}/stream_config.sock; + + error_log ${{ADMIN_ERROR_LOG}} ${{LOG_LEVEL}}; + + content_by_lua_block { + Kong.stream_config_listener() + } +} +> end -- database == "off" > end -- #stream_listeners > 0 ]] diff --git a/spec/01-unit/03-conf_loader_spec.lua b/spec/01-unit/03-conf_loader_spec.lua index f6ca62077332..f9c801d5e8f2 100644 --- a/spec/01-unit/03-conf_loader_spec.lua +++ b/spec/01-unit/03-conf_loader_spec.lua @@ -87,6 +87,18 @@ describe("Configuration loader", function() assert.True(conf.loaded_plugins["foo"]) assert.True(conf.loaded_plugins["bar"]) end) + it("apply # transformations when loading from config file directly", function() + local conf = assert(conf_loader(nil, { + pg_password = "!abCDefGHijKL4\\#1MN2OP3", + })) + assert.same("!abCDefGHijKL4#1MN2OP3", conf.pg_password) + end) + it("no longer applies # transformations when loading from .kong_env (issue #5761)", function() + local conf = assert(conf_loader(nil, { + pg_password = "!abCDefGHijKL4\\#1MN2OP3", + }, { from_kong_env = true, })) + assert.same("!abCDefGHijKL4\\#1MN2OP3", conf.pg_password) + end) it("loads custom plugins surrounded by spaces", function() local conf = assert(conf_loader(nil, { plugins = " hello-world , another-one " diff --git a/spec/01-unit/11-reports_spec.lua b/spec/01-unit/11-reports_spec.lua index ee59d0212729..96eab5fd7169 100644 --- a/spec/01-unit/11-reports_spec.lua +++ b/spec/01-unit/11-reports_spec.lua @@ -170,6 +170,32 @@ describe("reports", function() end) end) + describe("sends 'kic'", function() + it("default (off)", function() + local conf = assert(conf_loader(nil)) + reports.configure_ping(conf) + + local thread = helpers.tcp_server(8189) + reports.send_ping("127.0.0.1", 8189) + + local _, res = assert(thread:join()) + assert._matches("kic=false", res, nil, true) + end) + + it("enabled", function() + local conf = assert(conf_loader(nil, { + kic = "on", + })) + reports.configure_ping(conf) + + local thread = helpers.tcp_server(8189) + reports.send_ping("127.0.0.1", 8189) + + local _, res = assert(thread:join()) + assert.matches("kic=true", res, nil, true) + end) + end) + describe("sends '_admin' for 'admin_listen'", function() it("off", function() local conf = assert(conf_loader(nil, { diff --git a/spec/02-integration/02-cmd/03-reload_spec.lua b/spec/02-integration/02-cmd/03-reload_spec.lua index 4eebbca84b9e..6d2769956b4e 100644 --- a/spec/02-integration/02-cmd/03-reload_spec.lua +++ b/spec/02-integration/02-cmd/03-reload_spec.lua @@ -2,9 +2,6 @@ local helpers = require "spec.helpers" local cjson = require "cjson" -for _, strategy in helpers.each_strategy() do - - local function get_kong_workers() local workers helpers.wait_until(function() @@ -40,7 +37,7 @@ local function assert_wait_call(fn, ...) end -local function wait_until_no_common_workers(workers, expected_total) +local function wait_until_no_common_workers(workers, expected_total, strategy) if strategy == "cassandra" then ngx.sleep(0.5) end @@ -71,16 +68,18 @@ local function wait_until_no_common_workers(workers, expected_total) end -local function kong_reload(...) +local function kong_reload(strategy, ...) local workers = get_kong_workers() local ok, err = helpers.kong_exec(...) if ok then - wait_until_no_common_workers(workers) + wait_until_no_common_workers(workers, nil, strategy) end return ok, err end +for _, strategy in helpers.each_strategy() do + describe("kong reload #" .. strategy, function() lazy_setup(function() helpers.get_db_utils(nil, {}) -- runs migrations @@ -99,7 +98,7 @@ describe("kong reload #" .. strategy, function() local nginx_pid = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) -- kong_exec uses test conf too, so same prefix - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) local nginx_pid_after = assert_wait_call(helpers.file.read, helpers.test_conf.nginx_pid) @@ -182,7 +181,7 @@ describe("kong reload #" .. strategy, function() local prng_seeds_1 = json.prng_seeds client:close() - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -215,7 +214,7 @@ describe("kong reload #" .. strategy, function() local node_id_1 = json.node_id client:close() - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) client = helpers.admin_client() local res = assert(client:get("/")) @@ -291,7 +290,7 @@ describe("kong reload #" .. strategy, function() - example.test ]], yaml_file) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) helpers.wait_until(function() pok, admin_client = pcall(helpers.admin_client) @@ -361,7 +360,7 @@ describe("kong reload #" .. strategy, function() admin_client = assert(helpers.admin_client()) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) admin_client = assert(helpers.admin_client()) local res = assert(admin_client:send { @@ -452,7 +451,7 @@ describe("kong reload #" .. strategy, function() weight: 100 ]], yaml_file) - assert(kong_reload("reload --prefix " .. helpers.test_conf.prefix)) + assert(kong_reload(strategy, "reload --prefix " .. helpers.test_conf.prefix)) helpers.wait_until(function() pok, admin_client = pcall(helpers.admin_client) @@ -492,3 +491,157 @@ describe("kong reload #" .. strategy, function() end) end + + +describe("key-auth plugin invalidation on dbless reload", function() + it("(regression - issue 5705)", function() + local admin_client + local proxy_client + local yaml_file = helpers.make_yaml_file([[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + consumers: + - username: my-user + keyauth_credentials: + - key: my-key + ]]) + + finally(function() + os.remove(yaml_file) + helpers.stop_kong(helpers.test_conf.prefix, true) + if admin_client then + admin_client:close() + end + if proxy_client then + proxy_client:close() + end + end) + + assert(helpers.start_kong({ + database = "off", + declarative_config = yaml_file, + nginx_worker_processes = 1, + nginx_conf = "spec/fixtures/custom_nginx.template", + })) + + proxy_client = helpers.proxy_client() + local res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-key" + } + }) + assert.res_status(200, res) + + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-new-key" + } + }) + assert.res_status(401, res) + + proxy_client:close() + + admin_client = assert(helpers.admin_client()) + local res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same(1, #json.data) + assert.same("my-key", json.data[1].key) + admin_client:close() + + helpers.make_yaml_file([[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + consumers: + - username: my-user + keyauth_credentials: + - key: my-new-key + ]], yaml_file) + assert(kong_reload("off", "reload --prefix " .. helpers.test_conf.prefix)) + + + local res + + helpers.wait_until(function() + admin_client = assert(helpers.admin_client()) + + res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + local body = assert.res_status(200, res) + local json = cjson.decode(body) + admin_client:close() + assert.same(1, #json.data) + return "my-new-key" == json.data[1].key + end, 5) + + helpers.wait_until(function() + proxy_client = helpers.proxy_client() + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-key" + } + }) + proxy_client:close() + return res.status == 401 + end, 5) + + helpers.wait_until(function() + proxy_client = helpers.proxy_client() + res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["apikey"] = "my-new-key" + } + }) + local body = res:read_body() + proxy_client:close() + return body ~= [[{"message":"Invalid authentication credentials"}]] + end, 5) + + admin_client = assert(helpers.admin_client()) + local res = assert(admin_client:send { + method = "GET", + path = "/key-auths", + }) + assert.res_status(200, res) + + local body = assert.res_status(200, res) + local json = cjson.decode(body) + assert.same(1, #json.data) + assert.same("my-new-key", json.data[1].key) + admin_client:close() + + end) +end) + diff --git a/spec/02-integration/04-admin_api/15-off_spec.lua b/spec/02-integration/04-admin_api/15-off_spec.lua index 0fd0e9d71642..fa8c681d4aa0 100644 --- a/spec/02-integration/04-admin_api/15-off_spec.lua +++ b/spec/02-integration/04-admin_api/15-off_spec.lua @@ -25,6 +25,8 @@ describe("Admin API #off", function() assert(helpers.start_kong({ database = "off", mem_cache_size = "10m", + stream_listen = "127.0.0.1:9011", + nginx_conf = "spec/fixtures/custom_nginx.template", })) end) @@ -206,6 +208,7 @@ describe("Admin API #off", function() assert.response(res).has.status(201) end) + it("accepts configuration as a JSON string", function() local res = assert(client:send { method = "POST", @@ -551,6 +554,41 @@ describe("Admin API #off", function() assert.response(res).has.status(201) end) + + it("updates stream subsystem config", function() + local res = assert(client:send { + method = "POST", + path = "/config", + body = { + config = [[ + _format_version: "1.1" + services: + - connect_timeout: 60000 + host: 127.0.0.1 + name: mock + port: 15557 + protocol: tcp + routes: + - name: mock_route + protocols: + - tcp + destinations: + - port: 9011 + ]], + }, + headers = { + ["Content-Type"] = "application/json" + } + }) + + assert.response(res).has.status(201) + + local sock = ngx.socket.tcp() + assert(sock:connect("127.0.0.1", 9011)) + assert(sock:send("hi\n")) + assert.equals(sock:receive(), "hi") + sock:close() + end) end) describe("/upstreams", function() diff --git a/spec/02-integration/05-proxy/01-proxy_spec.lua b/spec/02-integration/05-proxy/01-proxy_spec.lua index 92c55349d189..594cb4778b18 100644 --- a/spec/02-integration/05-proxy/01-proxy_spec.lua +++ b/spec/02-integration/05-proxy/01-proxy_spec.lua @@ -15,9 +15,9 @@ local function get_listeners(filename) local file = assert(utils.readfile(filename)) local result = {} for block in file:gmatch("[%\n%s]+server%s+(%b{})") do - local server = {} local server_name = block:match("[%\n%s]server_name%s(.-);") server_name = server_name and stringx.strip(server_name) or "stream" + local server = result[server_name] or {} result[server_name] = server for listen in block:gmatch("[%\n%s]listen%s(.-);") do listen = stringx.strip(listen) @@ -98,13 +98,28 @@ describe("#stream proxy interface listeners", function() stream_listen = "127.0.0.1:9011, 127.0.0.1:9012", })) - assert.equals(1, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) - assert.same({ - ["127.0.0.1:9011"] = 1, - ["127.0.0.1:9012"] = 2, - [1] = "127.0.0.1:9011", - [2] = "127.0.0.1:9012", - }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + if helpers.test_conf.database == "off" then + local stream_config_sock_path = "unix:" .. helpers.test_conf.prefix .. "/stream_config.sock" + + assert.equals(2, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) + assert.same({ + ["127.0.0.1:9011"] = 1, + ["127.0.0.1:9012"] = 2, + [stream_config_sock_path] = 3, + [1] = "127.0.0.1:9011", + [2] = "127.0.0.1:9012", + [3] = stream_config_sock_path, + }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + + else + assert.equals(1, count_server_blocks(helpers.test_conf.nginx_kong_stream_conf)) + assert.same({ + ["127.0.0.1:9011"] = 1, + ["127.0.0.1:9012"] = 2, + [1] = "127.0.0.1:9011", + [2] = "127.0.0.1:9012", + }, get_listeners(helpers.test_conf.nginx_kong_stream_conf).stream) + end for i = 9011, 9012 do local sock = ngx.socket.tcp() diff --git a/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua b/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua index ec4d9660d1fa..8d66b84f38cc 100644 --- a/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua +++ b/spec/02-integration/05-proxy/10-balancer/01-healthchecks_spec.lua @@ -7,6 +7,9 @@ local utils = require "kong.tools.utils" for _, strategy in helpers.each_strategy() do local bp + local DB_UPDATE_PROPAGATION = strategy == "cassandra" and 0.1 or 0 + local DB_UPDATE_FREQUENCY = strategy == "cassandra" and 0.1 or 0.1 + describe("Healthcheck #" .. strategy, function() lazy_setup(function() bp = bu.get_db_utils_for_dc_and_admin_api(strategy, { @@ -61,8 +64,10 @@ for _, strategy in helpers.each_strategy() do assert(helpers.start_kong({ database = strategy, + dns_resolver = "127.0.0.1", nginx_conf = "spec/fixtures/custom_nginx.template", - db_update_frequency = 0.1, + db_update_frequency = DB_UPDATE_FREQUENCY, + db_update_propagation = DB_UPDATE_PROPAGATION, }, nil, nil, fixtures)) end) @@ -342,10 +347,12 @@ for _, strategy in helpers.each_strategy() do assert(helpers.start_kong({ database = strategy, + dns_resolver = "127.0.0.1", nginx_conf = "spec/fixtures/custom_nginx.template", lua_ssl_trusted_certificate = "spec/fixtures/kong_spec.crt", stream_listen = "off", - db_update_frequency = 0.1, + db_update_frequency = DB_UPDATE_FREQUENCY, + db_update_propagation = DB_UPDATE_PROPAGATION, plugins = "bundled,fail-once-auth", })) end) @@ -366,12 +373,14 @@ for _, strategy in helpers.each_strategy() do -- start a second Kong instance helpers.start_kong({ database = strategy, + dns_resolver = "127.0.0.1", admin_listen = "127.0.0.1:" .. admin_port_2, proxy_listen = "127.0.0.1:" .. proxy_port_2, stream_listen = "off", prefix = "servroot2", log_level = "debug", - db_update_frequency = 0.1, + db_update_frequency = DB_UPDATE_FREQUENCY, + db_update_propagation = DB_UPDATE_PROPAGATION, }) end) @@ -390,12 +399,18 @@ for _, strategy in helpers.each_strategy() do local upstream_name, upstream_id = bu.add_upstream(bp) local port = bu.add_target(bp, upstream_id, localhost) local api_host = bu.add_api(bp, upstream_name) + bu.wait_for_router_update(bp, old_rv, localhost, proxy_port_1, admin_port_1) + old_rv = bu.get_router_version(admin_port_1) bu.wait_for_router_update(bp, old_rv, localhost, proxy_port_2, admin_port_2) bu.end_testcase_setup(strategy, bp) - -- server responds, then fails, then responds again - local server = bu.http_server(localhost, port, { 20, 20, 20 }) + local server + helpers.wait_until(function() + server = assert(bu.http_server(localhost, port, { 20, 20, 20 })) + return true + end, 10) + -- server responds, then fails, then responds again local seq = { { port = proxy_port_2, oks = 10, fails = 0, last_status = 200 }, { port = proxy_port_1, oks = 10, fails = 0, last_status = 200 }, diff --git a/spec/02-integration/05-proxy/10-balancer/04-round-robin_spec.lua b/spec/02-integration/05-proxy/10-balancer/04-round-robin_spec.lua index aee466b9d61d..bd6da42e307a 100644 --- a/spec/02-integration/05-proxy/10-balancer/04-round-robin_spec.lua +++ b/spec/02-integration/05-proxy/10-balancer/04-round-robin_spec.lua @@ -297,7 +297,6 @@ for _, consistency in ipairs(bu.consistencies) do local _, _, status = bu.client_requests(1, api_host) assert.same(503, status) end) - end) describe("Balancing with no targets #" .. consistency, function() @@ -335,6 +334,51 @@ for _, consistency in ipairs(bu.consistencies) do local _, _, status = bu.client_requests(1, api_host) assert.same(503, status) end) + + for mode, localhost in pairs(bu.localhosts) do + it("removing and adding the same target #" .. mode, function() + + bu.begin_testcase_setup(strategy, bp) + local upstream_name, upstream_id = bu.add_upstream(bp) + local port = bu.add_target(bp, upstream_id, localhost, nil, { weight = 100 }) + local api_host = bu.add_api(bp, upstream_name) + bu.end_testcase_setup(strategy, bp, consistency) + + local requests = 20 + + local server = bu.http_server(localhost, port, { requests }) + local oks = bu.client_requests(requests, api_host) + local _, count = server:done() + assert.equal(requests, oks) + assert.equal(requests, count) + + -- remove target + bu.begin_testcase_setup_update(strategy, bp) + bu.add_target(bp, upstream_id, localhost, port, { + weight = 0, + }) + bu.end_testcase_setup(strategy, bp, consistency) + + server = bu.http_server(localhost, port, { requests }) + oks = bu.client_requests(requests, api_host) + _, count = server:done() + assert.equal(0, oks) + assert.equal(0, count) + + -- add the target back with same weight as initial weight + bu.begin_testcase_setup_update(strategy, bp) + bu.add_target(bp, upstream_id, localhost, port, { + weight = 100, + }) + bu.end_testcase_setup(strategy, bp, consistency) + + server = bu.http_server(localhost, port, { requests }) + oks = bu.client_requests(requests, api_host) + _, count = server:done() + assert.equal(requests, oks) + assert.equal(requests, count) + end) + end end) end diff --git a/spec/02-integration/05-proxy/22-reports_spec.lua b/spec/02-integration/05-proxy/22-reports_spec.lua index 06f4377cf112..23046646d3e0 100644 --- a/spec/02-integration/05-proxy/22-reports_spec.lua +++ b/spec/02-integration/05-proxy/22-reports_spec.lua @@ -422,5 +422,22 @@ for _, strategy in helpers.each_strategy() do assert.match("tcp_streams=1", reports_data[1]) -- it counts the stream request for the ping assert.match("tls_streams=1", reports_data[1]) end) + + it("does not log NGINX-produced errors", function() + local proxy_client = assert(helpers.proxy_client()) + local res = assert(proxy_client:send { + method = "GET", + path = "/", + headers = { + ["X-Large"] = string.rep("a", 2^10 * 10), -- default large_client_header_buffers is 8k + } + }) + assert.res_status(494, res) + proxy_client:close() + + assert.errlog() + .not_has + .line([[could not determine log suffix]], true) + end) end) end diff --git a/spec/02-integration/06-invalidations/01-cluster_events_spec.lua b/spec/02-integration/06-invalidations/01-cluster_events_spec.lua index a2f9482e0553..b2831e7ebfbb 100644 --- a/spec/02-integration/06-invalidations/01-cluster_events_spec.lua +++ b/spec/02-integration/06-invalidations/01-cluster_events_spec.lua @@ -314,6 +314,39 @@ for _, strategy in helpers.each_strategy() do assert(cluster_events_1:poll()) assert.spy(spy_func).was_called(1) -- called end) + + it("broadcasts an event with a polling delay for subscribers", function() + local delay = 1 + + local cluster_events_1 = assert(kong_cluster_events.new { + db = db, + node_id = uuid_1, + poll_delay = delay, + }) + + local cluster_events_2 = assert(kong_cluster_events.new { + db = db, + node_id = uuid_2, + poll_delay = delay, + }) + + assert(cluster_events_1:subscribe("nbf_channel", cb, false)) -- false to not start auto polling + + assert(cluster_events_2:broadcast("nbf_channel", "hello world")) + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_not_called() -- not called yet + + ngx.sleep(0.001) -- still yield in case our timer is set to 0 + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_not_called() -- still not called + + ngx.sleep(delay) -- go past our desired `nbf` delay + + assert(cluster_events_1:poll()) + assert.spy(spy_func).was_called(1) -- called + end) end) end) end diff --git a/spec/02-integration/11-dbless/01-respawn_spec.lua b/spec/02-integration/11-dbless/01-respawn_spec.lua new file mode 100644 index 000000000000..fae73ae75a69 --- /dev/null +++ b/spec/02-integration/11-dbless/01-respawn_spec.lua @@ -0,0 +1,87 @@ +local helpers = require "spec.helpers" + +describe("worker respawn", function() + local admin_client, proxy_client + + lazy_setup(function() + assert(helpers.start_kong({ + database = "off", + })) + end) + + lazy_teardown(function() + helpers.stop_kong(nil, true) + end) + + before_each(function() + admin_client = assert(helpers.admin_client()) + proxy_client = assert(helpers.proxy_client()) + end) + + after_each(function() + if admin_client then + admin_client:close() + end + + if proxy_client then + proxy_client:close() + end + end) + + it("lands on the correct cache page #5799", function() + local res = assert(admin_client:send { + method = "POST", + path = "/config", + body = { + config = [[ + _format_version: "1.1" + services: + - name: my-service + url: https://example.com + plugins: + - name: key-auth + routes: + - name: my-route + paths: + - / + + consumers: + - username: my-user + keyauth_credentials: + - key: my-key + ]], + }, + headers = { + ["Content-Type"] = "application/json" + } + }) + + assert.response(res).has.status(201) + + local res = assert(proxy_client:get("/")) + assert.res_status(401, res) + + res = assert(proxy_client:get("/", { + headers = { + apikey = "my-key" + } + })) + assert.res_status(200, res) + + -- kill all the workers forcing all of them to respawn + helpers.signal_workers(nil, "-TERM") + + proxy_client:close() + proxy_client = assert(helpers.proxy_client()) + + res = assert(proxy_client:get("/")) + assert.res_status(401, res) + + res = assert(proxy_client:get("/", { + headers = { + apikey = "my-key" + } + })) + assert.res_status(200, res) + end) +end) diff --git a/spec/fixtures/custom_nginx.template b/spec/fixtures/custom_nginx.template index 45592917ef50..2021324ba04c 100644 --- a/spec/fixtures/custom_nginx.template +++ b/spec/fixtures/custom_nginx.template @@ -738,6 +738,18 @@ stream { Kong.log() } } + +> if database == "off" then + server { + listen unix:${{PREFIX}}/stream_config.sock; + + error_log ${{PROXY_ERROR_LOG}} ${{LOG_LEVEL}}; + + content_by_lua_block { + Kong.stream_config_listener() + } + } +> end -- database == "off" > end -- #stream_listeners > 0 server { diff --git a/spec/helpers.lua b/spec/helpers.lua index 0016a590e7ea..174fb99b1d32 100644 --- a/spec/helpers.lua +++ b/spec/helpers.lua @@ -1778,6 +1778,104 @@ luassert:register("assertion", "cn", assert_cn, "assertion.cn.positive") +do + --- Generic modifier "errlog" + -- Will set an "errlog_path" value in the assertion state. + -- @name errlog + -- @param path A path to the errlog file (defaults to the test prefix's + -- errlog). + local function modifier_errlog(state, args) + local errlog_path = args[1] or conf.nginx_err_logs + + assert(type(errlog_path) == "string", "errlog modifier expects nil, or " .. + "a string as argument, got: " .. + type(errlog_path)) + + rawset(state, "errlog_path", errlog_path) + + return state + end + + luassert:register("modifier", "errlog", modifier_errlog) + + + --- Assertion checking is any line from a file matches the given regex or + -- substring. + -- @name line + -- @param regex The regex to evaluate against each line. + -- @param plain If true, the regex argument will be considered as a plain + -- string. + -- @param timeout An optional timeout after which the assertion will fail if + -- reached. + -- @param fpath An optional path to the file (defaults to the errlog + -- modifier) + -- @see errlog + -- @usage + -- assert.not_line("[error]", true) + -- assert.errlog().not_has.line("[error]", true) + local function match_line(state, args) + local regex = args[1] + local plain = args[2] + local timeout = args[3] or 2 + local fpath = args[4] or rawget(state, "errlog_path") + + assert(type(regex) == "string", + "Expected the regex argument to be a string") + assert(type(fpath) == "string", + "Expected the file path argument to be a string") + assert(type(timeout) == "number" and timeout > 0, + "Expected the timeout argument to be a positive number") + + local pok = pcall(wait_until, function() + local logs = pl_file.read(fpath) + local from, _, err + + for line in logs:gmatch("[^\r\n]+") do + if plain then + from = string.find(line, regex, nil, true) + + else + from, _, err = ngx.re.find(line, regex) + if err then + error(err) + end + end + + if from then + table.insert(args, 1, line) + table.insert(args, 1, regex) + args.n = 2 + return true + end + end + end, timeout) + + table.insert(args, 1, fpath) + args.n = args.n + 1 + + return pok + end + + say:set("assertion.match_line.negative", unindent [[ + Expected file at: + %s + To match: + %s + ]]) + say:set("assertion.match_line.positive", unindent [[ + Expected file at: + %s + To not match: + %s + But matched line: + %s + ]]) + luassert:register("assertion", "line", match_line, + "assertion.match_line.negative", + "assertion.match_line.positive") +end + + ---------------- -- DNS-record mocking. -- These function allow to create mock dns records that the test Kong instance @@ -2514,4 +2612,20 @@ end return kill.kill(pid_path, signal) end, + -- send signal to all Nginx workers, not including the master + signal_workers = function(prefix, signal, pid_path) + if not pid_path then + local running_conf = get_running_conf(prefix) + if not running_conf then + error("no config file found at prefix: " .. prefix) + end + + pid_path = running_conf.nginx_pid + end + + local cmd = string.format("pkill %s -P `cat %s`", signal, pid_path) + local _, code = pl_utils.execute(cmd) + + return code + end, }