diff --git a/.ci/run_tests.sh b/.ci/run_tests.sh index d3c8147de96..b4d0857418b 100755 --- a/.ci/run_tests.sh +++ b/.ci/run_tests.sh @@ -2,7 +2,7 @@ set -e -CMD="busted -v -o gtest --exclude-tags=ci" +CMD="busted -v -o gtest --exclude-tags=ci --repeat=3" if [ "$TEST_SUITE" == "unit" ]; then CMD="$CMD --coverage spec/unit && luacov-coveralls -i kong" diff --git a/.ci/setup_dnsmasq.sh b/.ci/setup_dnsmasq.sh new file mode 100644 index 00000000000..0eddf6b93de --- /dev/null +++ b/.ci/setup_dnsmasq.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +if [ "$TEST_SUITE" == "unit" ]; then + echo "Exiting, no integration tests" + exit +fi + +mkdir -p $DNSMASQ_DIR + +if [ ! "$(ls -A $DNSMASQ_DIR)" ]; then + pushd $DNSMASQ_DIR + wget http://www.thekelleys.org.uk/dnsmasq/dnsmasq-${DNSMASQ_VERSION}.tar.gz + tar xzf dnsmasq-${DNSMASQ_VERSION}.tar.gz + + pushd dnsmasq-${DNSMASQ_VERSION} + make install DESTDIR=$DNSMASQ_DIR + popd + + popd +fi diff --git a/.ci/setup_lua.sh b/.ci/setup_lua.sh index 0f455c44805..2a2b2b2255d 100644 --- a/.ci/setup_lua.sh +++ b/.ci/setup_lua.sh @@ -16,12 +16,12 @@ source .ci/platform.sh # Lua/LuaJIT ############ -if [ "$LUA" == "luajit" ]; then +if [ "$LUA_VERSION" == "luajit" ]; then LUAJIT="yes" - LUA="luajit-2.0" -elif [ "$LUA" == "luajit-2.0" ]; then + LUA_VERSION="luajit-2.0" +elif [ "$LUA_VERSION" == "luajit-2.0" ]; then LUAJIT="yes" -elif [ "$LUA" == "luajit-2.1" ]; then +elif [ "$LUA_VERSION" == "luajit-2.1" ]; then LUAJIT="yes" fi @@ -33,9 +33,9 @@ if [ "$LUAJIT" == "yes" ]; then git clone https://github.com/luajit/luajit $LUAJIT_DIR pushd $LUAJIT_DIR - if [ "$LUA" == "luajit-2.0" ]; then + if [ "$LUA_VERSION" == "luajit-2.0" ]; then git checkout v2.0.4 - elif [ "$LUA" == "luajit-2.1" ]; then + elif [ "$LUA_VERSION" == "luajit-2.1" ]; then git checkout v2.1 fi @@ -43,22 +43,22 @@ if [ "$LUAJIT" == "yes" ]; then make install PREFIX=$LUAJIT_DIR popd - if [ "$LUA" == "luajit-2.1" ]; then + if [ "$LUA_VERSION" == "luajit-2.1" ]; then ln -sf $LUAJIT_DIR/bin/luajit-2.1.0-beta1 $LUAJIT_DIR/bin/luajit fi ln -sf $LUAJIT_DIR/bin/luajit $LUAJIT_DIR/bin/lua fi - LUA_INCLUDE="$LUAJIT_DIR/include/$LUA" + LUA_INCLUDE="$LUAJIT_DIR/include/$LUA_VERSION" else - if [ "$LUA" == "lua5.1" ]; then + if [ "$LUA_VERSION" == "lua5.1" ]; then curl http://www.lua.org/ftp/lua-5.1.5.tar.gz | tar xz pushd lua-5.1.5 - elif [ "$LUA" == "lua5.2" ]; then + elif [ "$LUA_VERSION" == "lua5.2" ]; then curl http://www.lua.org/ftp/lua-5.2.3.tar.gz | tar xz pushd lua-5.2.3 - elif [ "$LUA" == "lua5.3" ]; then + elif [ "$LUA_VERSION" == "lua5.3" ]; then curl http://www.lua.org/ftp/lua-5.3.0.tar.gz | tar xz pushd lua-5.3.0 fi @@ -84,11 +84,11 @@ git checkout v$LUAROCKS_VERSION if [ "$LUAJIT" == "yes" ]; then LUA_DIR=$LUAJIT_DIR -elif [ "$LUA" == "lua5.1" ]; then +elif [ "$LUA_VERSION" == "lua5.1" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.1" -elif [ "$LUA" == "lua5.2" ]; then +elif [ "$LUA_VERSION" == "lua5.2" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.2" -elif [ "$LUA" == "lua5.3" ]; then +elif [ "$LUA_VERSION" == "lua5.3" ]; then CONFIGURE_FLAGS=$CONFIGURE_FLAGS" --lua-version=5.3" fi diff --git a/.ci/setup_serf.sh b/.ci/setup_serf.sh new file mode 100755 index 00000000000..7d0032dbd99 --- /dev/null +++ b/.ci/setup_serf.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +set -e + +if [ "$TEST_SUITE" == "unit" ]; then + echo "Exiting, no integration tests" + exit +fi + +mkdir -p $SERF_DIR + +if [ ! "$(ls -A $SERF_DIR)" ]; then + pushd $SERF_DIR + wget https://releases.hashicorp.com/serf/${SERF_VERSION}/serf_${SERF_VERSION}_linux_amd64.zip + unzip serf_${SERF_VERSION}_linux_amd64.zip + popd +fi diff --git a/.luacheckrc b/.luacheckrc index 1eb3eaf1a7c..9381fe66113 100644 --- a/.luacheckrc +++ b/.luacheckrc @@ -1,6 +1,6 @@ redefined = false unused_args = false -globals = {"ngx", "dao", "app", "configuration", "process_id"} +globals = {"ngx", "dao", "app", "configuration", "events"} files["kong/"] = { std = "luajit" @@ -20,5 +20,5 @@ files["kong/vendor/resty_http.lua"] = { } files["spec/"] = { - globals = {"describe", "it", "before_each", "setup", "after_each", "teardown", "stub", "mock", "spy", "finally", "pending"} + globals = {"describe", "it", "before_each", "setup", "after_each", "teardown", "stub", "mock", "spy", "finally", "pending", "build"} } diff --git a/.travis.yml b/.travis.yml index 72238ffecc1..5dc4b3fee71 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,14 +11,18 @@ addons: - build-essential env: global: - - LUA=luajit-2.1 - - CASSANDRA_VERSION=2.1.9 + - LUA_VERSION=luajit-2.1 + - CASSANDRA_VERSION=2.2.4 - LUAROCKS_VERSION=2.2.2 - OPENSSL_VERSION=1.0.2e - OPENRESTY_VERSION=1.9.3.1 + - SERF_VERSION=0.7.0 + - DNSMASQ_VERSION=2.75 - LUAJIT_DIR=$HOME/luajit - LUAROCKS_DIR=$HOME/luarocks - OPENRESTY_DIR=$HOME/openresty + - SERF_DIR=$HOME/serf + - DNSMASQ_DIR=$HOME/dnsmasq - CASSANDRA_HOSTS=127.0.0.1 matrix: - TEST_SUITE=unit @@ -28,7 +32,9 @@ before_install: - bash .ci/setup_lua.sh - bash .ci/setup_openresty.sh - bash .ci/setup_cassandra.sh - - export PATH="$LUAJIT_DIR/bin:$LUAROCKS_DIR/bin:$OPENRESTY_DIR/nginx/sbin:$PATH" + - bash .ci/setup_serf.sh + - bash .ci/setup_dnsmasq.sh + - export PATH="$LUAJIT_DIR/bin:$LUAROCKS_DIR/bin:$OPENRESTY_DIR/nginx/sbin:$SERF_DIR:$DNSMASQ_DIR/usr/local/sbin:$PATH" - export LUA_PATH="./?.lua;$LUAROCKS_DIR/share/lua/5.1/?.lua;$LUAROCKS_DIR/share/lua/5.1/?/init.lua;$LUAROCKS_DIR/lib/lua/5.1/?.lua;$LUA_PATH" - export LUA_CPATH="./?.so;$LUAROCKS_DIR/lib/lua/5.1/?.so;$LUA_CPATH" install: @@ -38,7 +44,6 @@ install: - luarocks install luacheck - luarocks make kong-*.rockspec - "kong config -c kong.yml -e TEST" - - 'sed -i "2 i\dns_resolver: server" kong_TEST.yml' script: - make lint - .ci/run_tests.sh @@ -49,4 +54,6 @@ cache: - $LUAJIT_DIR - $LUAROCKS_DIR - $OPENRESTY_DIR - - $HOME/.ccm/repository + - $SERF_DIR + - $DNSMASQ_DIR + - $HOME/.ccm/repository \ No newline at end of file diff --git a/Makefile b/Makefile index f556b7acf73..7d2b090c7d1 100644 --- a/Makefile +++ b/Makefile @@ -54,4 +54,4 @@ coverage: @rm -f luacov.* @busted --coverage spec/ @luacov -c spec/.luacov - @tail -n 1 luacov.report.out | awk '{ print $$3 }' + @tail -n 1 luacov.report.out | awk '{ print $$3 }' \ No newline at end of file diff --git a/README.md b/README.md index c69c48961a4..b2034ff2f78 100644 --- a/README.md +++ b/README.md @@ -252,7 +252,7 @@ Support, Demo, Training, API Certifications and Consulting available at https:// ## License ``` -Copyright 2015 Mashape, Inc +Copyright 2016 Mashape, Inc Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/bin/kong b/bin/kong index 589b197fbc8..59de54d7b6d 100755 --- a/bin/kong +++ b/bin/kong @@ -10,26 +10,28 @@ -- This script is not parsed by lapp due to limitations of the said framework as it -- is currently implemented. -local cutils = require "kong.cli.utils" -local infos = cutils.get_kong_infos() +local luarocks = require "kong.cli.utils.luarocks" +local infos = luarocks.get_kong_infos() local commands = { - db = "kong.cli.db", - stop = "kong.cli.stop", - quit = "kong.cli.quit", - start = "kong.cli.start", - reload = "kong.cli.reload", - config = "kong.cli.config", - restart = "kong.cli.restart", - version = "kong.cli.version", - ["--version"] = "kong.cli.version", - migrations = "kong.cli.migrations" + db = "kong.cli.cmds.db", + stop = "kong.cli.cmds.stop", + quit = "kong.cli.cmds.quit", + start = "kong.cli.cmds.start", + reload = "kong.cli.cmds.reload", + config = "kong.cli.cmds.config", + restart = "kong.cli.cmds.restart", + version = "kong.cli.cmds.version", + status = "kong.cli.cmds.status", + migrations = "kong.cli.cmds.migrations", + cluster = "kong.cli.cmds.cluster", + ["--version"] = "kong.cli.cmds.version" } local help_message = string.format([[ Usage: kong where is one of: - start, restart, reload, stop, quit, version + start, restart, reload, stop, quit, cluster, status, migrations, version kong --help print this message kong --help print the help message of a command diff --git a/kong-0.5.4-1.rockspec b/kong-0.6.0rc3-1.rockspec similarity index 80% rename from kong-0.5.4-1.rockspec rename to kong-0.6.0rc3-1.rockspec index c6527a5b01c..1d46a54e83d 100644 --- a/kong-0.5.4-1.rockspec +++ b/kong-0.6.0rc3-1.rockspec @@ -1,9 +1,9 @@ package = "kong" -version = "0.5.4-1" +version = "0.6.0rc3-1" supported_platforms = {"linux", "macosx"} source = { url = "git://github.com/Mashape/kong", - tag = "0.5.4" + tag = "0.6.0rc3" } description = { summary = "Kong is a scalable and customizable API Management Layer built on top of Nginx.", @@ -26,6 +26,7 @@ dependencies = { "ansicolors ~> 1.0.2-3", "lbase64 ~> 20120820-1", "lua-resty-iputils ~> 0.2.0-1", + "mediator_lua ~> 1.1.2-0", "luasocket ~> 2.0.2-6", "lrexlib-pcre ~> 2.7.2-1", @@ -45,20 +46,25 @@ build = { ["kong.constants"] = "kong/constants.lua", - ["kong.cli.utils"] = "kong/cli/utils.lua", - ["kong.cli.utils.dnsmasq"] = "kong/cli/utils/dnsmasq.lua", + ["kong.cli.utils.logger"] = "kong/cli/utils/logger.lua", + ["kong.cli.utils.luarocks"] = "kong/cli/utils/luarocks.lua", ["kong.cli.utils.ssl"] = "kong/cli/utils/ssl.lua", - ["kong.cli.utils.signal"] = "kong/cli/utils/signal.lua", ["kong.cli.utils.input"] = "kong/cli/utils/input.lua", - ["kong.cli.db"] = "kong/cli/db.lua", - ["kong.cli.config"] = "kong/cli/config.lua", - ["kong.cli.quit"] = "kong/cli/quit.lua", - ["kong.cli.stop"] = "kong/cli/stop.lua", - ["kong.cli.start"] = "kong/cli/start.lua", - ["kong.cli.reload"] = "kong/cli/reload.lua", - ["kong.cli.restart"] = "kong/cli/restart.lua", - ["kong.cli.version"] = "kong/cli/version.lua", - ["kong.cli.migrations"] = "kong/cli/migrations.lua", + ["kong.cli.utils.services"] = "kong/cli/utils/services.lua", + ["kong.cli.cmds.config"] = "kong/cli/cmds/config.lua", + ["kong.cli.cmds.quit"] = "kong/cli/cmds/quit.lua", + ["kong.cli.cmds.stop"] = "kong/cli/cmds/stop.lua", + ["kong.cli.cmds.start"] = "kong/cli/cmds/start.lua", + ["kong.cli.cmds.reload"] = "kong/cli/cmds/reload.lua", + ["kong.cli.cmds.restart"] = "kong/cli/cmds/restart.lua", + ["kong.cli.cmds.version"] = "kong/cli/cmds/version.lua", + ["kong.cli.cmds.status"] = "kong/cli/cmds/status.lua", + ["kong.cli.cmds.migrations"] = "kong/cli/cmds/migrations.lua", + ["kong.cli.cmds.cluster"] = "kong/cli/cmds/cluster.lua", + ["kong.cli.services.base_service"] = "kong/cli/services/base_service.lua", + ["kong.cli.services.dnsmasq"] = "kong/cli/services/dnsmasq.lua", + ["kong.cli.services.serf"] = "kong/cli/services/serf.lua", + ["kong.cli.services.nginx"] = "kong/cli/services/nginx.lua", ["kong.tools.io"] = "kong/tools/io.lua", ["kong.tools.utils"] = "kong/tools/utils.lua", @@ -66,6 +72,7 @@ build = { ["kong.tools.syslog"] = "kong/tools/syslog.lua", ["kong.tools.ngx_stub"] = "kong/tools/ngx_stub.lua", ["kong.tools.printable"] = "kong/tools/printable.lua", + ["kong.tools.cluster"] = "kong/tools/cluster.lua", ["kong.tools.responses"] = "kong/tools/responses.lua", ["kong.tools.timestamp"] = "kong/tools/timestamp.lua", ["kong.tools.migrations"] = "kong/tools/migrations.lua", @@ -79,12 +86,16 @@ build = { ["kong.core.certificate"] = "kong/core/certificate.lua", ["kong.core.resolver"] = "kong/core/resolver.lua", ["kong.core.plugins_iterator"] = "kong/core/plugins_iterator.lua", + ["kong.core.hooks"] = "kong/core/hooks.lua", ["kong.core.reports"] = "kong/core/reports.lua", + ["kong.core.cluster"] = "kong/core/cluster.lua", + ["kong.core.events"] = "kong/core/events.lua", ["kong.dao.cassandra.schema.migrations"] = "kong/dao/cassandra/schema/migrations.lua", ["kong.dao.error"] = "kong/dao/error.lua", ["kong.dao.schemas_validation"] = "kong/dao/schemas_validation.lua", ["kong.dao.schemas.apis"] = "kong/dao/schemas/apis.lua", + ["kong.dao.schemas.nodes"] = "kong/dao/schemas/nodes.lua", ["kong.dao.schemas.consumers"] = "kong/dao/schemas/consumers.lua", ["kong.dao.schemas.plugins"] = "kong/dao/schemas/plugins.lua", ["kong.dao.cassandra.factory"] = "kong/dao/cassandra/factory.lua", @@ -92,6 +103,7 @@ build = { ["kong.dao.cassandra.base_dao"] = "kong/dao/cassandra/base_dao.lua", ["kong.dao.cassandra.migrations"] = "kong/dao/cassandra/migrations.lua", ["kong.dao.cassandra.apis"] = "kong/dao/cassandra/apis.lua", + ["kong.dao.cassandra.nodes"] = "kong/dao/cassandra/nodes.lua", ["kong.dao.cassandra.consumers"] = "kong/dao/cassandra/consumers.lua", ["kong.dao.cassandra.plugins"] = "kong/dao/cassandra/plugins.lua", @@ -111,11 +123,13 @@ build = { ["kong.plugins.basic-auth.handler"] = "kong/plugins/basic-auth/handler.lua", ["kong.plugins.basic-auth.access"] = "kong/plugins/basic-auth/access.lua", ["kong.plugins.basic-auth.schema"] = "kong/plugins/basic-auth/schema.lua", + ["kong.plugins.basic-auth.hooks"] = "kong/plugins/basic-auth/hooks.lua", ["kong.plugins.basic-auth.api"] = "kong/plugins/basic-auth/api.lua", ["kong.plugins.basic-auth.daos"] = "kong/plugins/basic-auth/daos.lua", ["kong.plugins.key-auth.schema.migrations"] = "kong/plugins/key-auth/schema/migrations.lua", ["kong.plugins.key-auth.handler"] = "kong/plugins/key-auth/handler.lua", + ["kong.plugins.key-auth.hooks"] = "kong/plugins/key-auth/hooks.lua", ["kong.plugins.key-auth.schema"] = "kong/plugins/key-auth/schema.lua", ["kong.plugins.key-auth.api"] = "kong/plugins/key-auth/api.lua", ["kong.plugins.key-auth.daos"] = "kong/plugins/key-auth/daos.lua", @@ -123,6 +137,7 @@ build = { ["kong.plugins.oauth2.schema.migrations"] = "kong/plugins/oauth2/schema/migrations.lua", ["kong.plugins.oauth2.handler"] = "kong/plugins/oauth2/handler.lua", ["kong.plugins.oauth2.access"] = "kong/plugins/oauth2/access.lua", + ["kong.plugins.oauth2.hooks"] = "kong/plugins/oauth2/hooks.lua", ["kong.plugins.oauth2.schema"] = "kong/plugins/oauth2/schema.lua", ["kong.plugins.oauth2.daos"] = "kong/plugins/oauth2/daos.lua", ["kong.plugins.oauth2.api"] = "kong/plugins/oauth2/api.lua", @@ -142,6 +157,7 @@ build = { ["kong.plugins.file-log.handler"] = "kong/plugins/file-log/handler.lua", ["kong.plugins.file-log.schema"] = "kong/plugins/file-log/schema.lua", + ["kong.plugins.mashape-analytics.schema.migrations"] = "kong/plugins/mashape-analytics/schema/migrations.lua", ["kong.plugins.mashape-analytics.handler"] = "kong/plugins/mashape-analytics/handler.lua", ["kong.plugins.mashape-analytics.schema"] = "kong/plugins/mashape-analytics/schema.lua", ["kong.plugins.mashape-analytics.buffer"] = "kong/plugins/mashape-analytics/buffer.lua", @@ -175,6 +191,7 @@ build = { ["kong.plugins.cors.schema"] = "kong/plugins/cors/schema.lua", ["kong.plugins.ssl.handler"] = "kong/plugins/ssl/handler.lua", + ["kong.plugins.ssl.hooks"] = "kong/plugins/ssl/hooks.lua", ["kong.plugins.ssl.ssl_util"] = "kong/plugins/ssl/ssl_util.lua", ["kong.plugins.ssl.schema"] = "kong/plugins/ssl/schema.lua", @@ -184,12 +201,24 @@ build = { ["kong.plugins.acl.schema.migrations"] = "kong/plugins/acl/schema/migrations.lua", ["kong.plugins.acl.handler"] = "kong/plugins/acl/handler.lua", ["kong.plugins.acl.schema"] = "kong/plugins/acl/schema.lua", + ["kong.plugins.acl.hooks"] = "kong/plugins/acl/hooks.lua", ["kong.plugins.acl.api"] = "kong/plugins/acl/api.lua", ["kong.plugins.acl.daos"] = "kong/plugins/acl/daos.lua", + ["kong.api.app"] = "kong/api/app.lua", + ["kong.api.crud_helpers"] = "kong/api/crud_helpers.lua", + ["kong.api.route_helpers"] = "kong/api/route_helpers.lua", + ["kong.api.routes.kong"] = "kong/api/routes/kong.lua", + ["kong.api.routes.apis"] = "kong/api/routes/apis.lua", + ["kong.api.routes.consumers"] = "kong/api/routes/consumers.lua", + ["kong.api.routes.plugins"] = "kong/api/routes/plugins.lua", + ["kong.api.routes.cache"] = "kong/api/routes/cache.lua", + ["kong.api.routes.cluster"] = "kong/api/routes/cluster.lua", + ["kong.plugins.jwt.schema.migrations"] = "kong/plugins/jwt/schema/migrations.lua", ["kong.plugins.jwt.handler"] = "kong/plugins/jwt/handler.lua", ["kong.plugins.jwt.schema"] = "kong/plugins/jwt/schema.lua", + ["kong.plugins.jwt.hooks"] = "kong/plugins/jwt/hooks.lua", ["kong.plugins.jwt.api"] = "kong/plugins/jwt/api.lua", ["kong.plugins.jwt.daos"] = "kong/plugins/jwt/daos.lua", ["kong.plugins.jwt.jwt_parser"] = "kong/plugins/jwt/jwt_parser.lua", @@ -198,6 +227,7 @@ build = { ["kong.plugins.hmac-auth.handler"] = "kong/plugins/hmac-auth/handler.lua", ["kong.plugins.hmac-auth.access"] = "kong/plugins/hmac-auth/access.lua", ["kong.plugins.hmac-auth.schema"] = "kong/plugins/hmac-auth/schema.lua", + ["kong.plugins.hmac-auth.hooks"] = "kong/plugins/hmac-auth/hooks.lua", ["kong.plugins.hmac-auth.api"] = "kong/plugins/hmac-auth/api.lua", ["kong.plugins.hmac-auth.daos"] = "kong/plugins/hmac-auth/daos.lua", @@ -216,4 +246,4 @@ build = { conf = { "kong.yml" }, bin = { "bin/kong" } } -} +} \ No newline at end of file diff --git a/kong.yml b/kong.yml index b95d901fbe2..bb450199e52 100644 --- a/kong.yml +++ b/kong.yml @@ -12,9 +12,9 @@ ## this property with the names of the plugins to load. ## Plugins will be loaded from the kong.plugins.{name}.* namespace. # custom_plugins: -# - hello_world -# - custom_plugin2 -# - ... + # - hello_world + # - custom_plugin2 + # - ... ###### ## The Kong working directory. Equivalent to nginx's prefix path. @@ -23,13 +23,33 @@ # nginx_working_dir: /usr/local/kong/ ###### -## Port which Kong proxies HTTP requests through, consumers will make requests against this port -## so make sure it is publicly available. -# proxy_port: 8000 +## Address and port on which the server will accept HTTP requests, consumers will make requests on this port. +# proxy_listen: "0.0.0.0:8000" ###### -## Same as proxy_port, but for HTTPS requests. -# proxy_ssl_port: 8443 +## Same as proxy_listen, but for HTTPS requests. +# proxy_listen_ssl: "0.0.0.0:8443" + +###### +## Address and port on which the admin API will listen to. The admin API is a private API which lets you +## manage your Kong infrastructure. It needs to be secured appropriately. +# admin_api_listen: "0.0.0.0:8001" + +###### +## Address and port used by the node to communicate with other Kong nodes in the cluster with both UDP and +## TCP messages. All the nodes in the cluster must be able to communicate with this node on this address. +## Only IPv4 addresses are allowed (no hostnames). +# cluster_listen: "0.0.0.0:7946" + +###### +## Address and port used by the node to communicate with the local clustering agent (TCP only, and local only). +## Used internally by this Kong node. Only IPv4 addresses are allowed (no hostnames). +# cluster_listen_rpc: "127.0.0.1:7373" + +###### +## The path to the SSL certificate and key that Kong will use when listening on the `https` port. +# ssl_cert_path: /path/to/certificate.pem +# ssl_key_path: /path/to/certificate.key ###### ## Specify how Kong performs DNS resolution (in the `dns_resolvers_available` property) you want to use. @@ -37,8 +57,10 @@ # dns_resolver: dnsmasq ###### -## DNS resolvers configuration. Specify a DNS server or the port on which you want -## dnsmasq to run. +## A dictionary of DNS resolvers Kong can use, and their respective properties. +## Currently `dnsmasq` (default, http://www.thekelleys.org.uk/dnsmasq/doc.html) and `server` are supported. +## By choosing `dnsmasq`, Kong will resolve hostnames using the local `/etc/hosts` file and `resolv.conf` +## configuration. By choosing `server`, you can specify a custom DNS server. # dns_resolvers_available: # server: # address: "8.8.8.8:53" @@ -46,9 +68,23 @@ # port: 8053 ###### -## Port on which the admin API will listen to. The admin API is a private API which lets you -## manage your Kong infrastructure. It needs to be secured appropriatly. -# admin_api_port: 8001 +## Cluster settings between Kong nodes. +## For more information take a look at the Clustering Reference: https://getkong.org/docs/latest/clustering/ +# cluster: + + ###### + ## Address and port used by the node to communicate with other Kong nodes in the cluster with both UDP and + ## TCP messages. All the nodes in the cluster must be able to communicate with this node on this address. + ## Only IPv4 addresses are allowed (no hostnames). + ## The advertise flag is used to change the address that we advertise to other nodes in the + ## cluster. By default, the cluster_listen address is advertised. However, in some cases + ## (specifically NAT traversal), there may be a routable address that cannot be bound to. + ## This flag enables gossiping a different address to support this. + # advertise: "" + + ###### + ## Key for encrypting network traffic within Kong. Must be a base64-encoded 16-byte key. + # encrypt: "foo" ###### ## Specify which database to use. Only "cassandra" is currently available. @@ -104,31 +140,26 @@ # password: cassandra ###### -## Time (in seconds) for which entities from the database (APIs, plugins configurations...) -## are cached by Kong. Increase this value if you want to lower the number of requests made -## to your database. -# database_cache_expiration: 5 - -###### -## SSL certificates to use. -# ssl_cert_path: /path/to/certificate.pem -# ssl_key_path: /path/to/certificate.key - -###### -## Sending anonymous error reports helps Kong developers to understand how it performs. +## Kong will send anonymous reports to Mashape. This helps Mashape fixing bugs/errors and improving Kong. +## By default is `true`. # send_anonymous_reports: true ###### -## Size (in MB) of the Lua cache. This value may not be smaller than 32MB. +## A value specifying (in MB) the size of the internal preallocated in-memory cache. Kong uses an in-memory +## cache to store database entities in order to optimize access to the underlying datastore. The cache size +## needs to be as big as the size of the entities being used by Kong at any given time. The default value +## is `128`, and the potential maximum value is the total size of the datastore. +## This value may not be smaller than 32MB. # memory_cache_size: 128 ###### -## The nginx configuration file which allows Kong to run. +## The NGINX configuration (or `nginx.conf`) that will be used for this instance. ## The placeholders will be computed and this property will be written as a file ## by Kong at `/nginx.conf` during startup. ## This file can tweaked to some extent, but many directives are necessary for Kong to work. ## /!\ BE CAREFUL nginx: | + user {{user}}; worker_processes auto; error_log logs/error.log error; daemon on; @@ -144,7 +175,7 @@ nginx: | } http { - resolver {{dns_resolver}}; + resolver {{dns_resolver}} ipv6=off; charset UTF-8; access_log logs/access.log; @@ -183,7 +214,9 @@ nginx: | lua_code_cache on; lua_max_running_timers 4096; lua_max_pending_timers 16384; - lua_shared_dict locks 100k; + lua_shared_dict reports_locks 100k; + lua_shared_dict cluster_locks 100k; + lua_shared_dict cluster_autojoin_locks 100k; lua_shared_dict cache {{memory_cache_size}}m; lua_shared_dict cassandra 1m; lua_shared_dict cassandra_prepared 5m; @@ -199,8 +232,8 @@ nginx: | server { server_name _; - listen {{proxy_port}}; - listen {{proxy_ssl_port}} ssl; + listen {{proxy_listen}}; + listen {{proxy_listen_ssl}} ssl; ssl_certificate_by_lua 'kong.ssl_certificate()'; @@ -252,7 +285,7 @@ nginx: | } server { - listen {{admin_api_port}}; + listen {{admin_api_listen}}; client_max_body_size 10m; client_body_buffer_size 10m; @@ -280,8 +313,5 @@ nginx: | location /robots.txt { return 200 'User-agent: *\nDisallow: /'; } - - # Do not remove, additional configuration placeholder for some plugins - # {{additional_configuration}} } - } + } \ No newline at end of file diff --git a/kong/api/app.lua b/kong/api/app.lua index 7edce77e879..ce5bb82256c 100644 --- a/kong/api/app.lua +++ b/kong/api/app.lua @@ -140,7 +140,7 @@ local function attach_routes(routes) end -- Load core routes -for _, v in ipairs({"kong", "apis", "consumers", "plugins"}) do +for _, v in ipairs({"kong", "apis", "consumers", "plugins", "cache", "cluster" }) do local routes = require("kong.api.routes."..v) attach_routes(routes) end diff --git a/kong/api/crud_helpers.lua b/kong/api/crud_helpers.lua index f594b8b8d97..0f05e030cc6 100644 --- a/kong/api/crud_helpers.lua +++ b/kong/api/crud_helpers.lua @@ -156,4 +156,4 @@ function _M.delete(primary_key_t, dao_collection, where_t) end end -return _M +return _M \ No newline at end of file diff --git a/kong/api/route_helpers.lua b/kong/api/route_helpers.lua index 0971b5a68e3..420e17651bd 100644 --- a/kong/api/route_helpers.lua +++ b/kong/api/route_helpers.lua @@ -2,14 +2,6 @@ local stringy = require "stringy" local _M = {} -function _M.get_hostname() - local f = io.popen ("/bin/hostname") - local hostname = f:read("*a") or "" - f:close() - hostname = string.gsub(hostname, "\n$", "") - return hostname -end - function _M.parse_status(value) local result = {} local parts = stringy.split(value, "\n") diff --git a/kong/api/routes/cache.lua b/kong/api/routes/cache.lua new file mode 100644 index 00000000000..b91c3dadcef --- /dev/null +++ b/kong/api/routes/cache.lua @@ -0,0 +1,33 @@ +local responses = require "kong.tools.responses" +local cache = require "kong.tools.database_cache" + +return { + ["/cache/"] = { + DELETE = function(self, dao_factory) + cache.delete_all() + return responses.send_HTTP_NO_CONTENT() + end + }, + + ["/cache/:key"] = { + GET = function(self, dao_factory) + if self.params.key then + local cached_item = cache.get(self.params.key) + if cached_item then + return responses.send_HTTP_OK(cached_item) + end + end + + return responses.send_HTTP_NOT_FOUND() + end, + + DELETE = function(self, dao_factory) + if self.params.key then + cache.delete(self.params.key) + return responses.send_HTTP_NO_CONTENT() + end + + return responses.send_HTTP_NOT_FOUND() + end + } +} diff --git a/kong/api/routes/cluster.lua b/kong/api/routes/cluster.lua new file mode 100644 index 00000000000..498171b66b5 --- /dev/null +++ b/kong/api/routes/cluster.lua @@ -0,0 +1,87 @@ +local responses = require "kong.tools.responses" +local cjson = require "cjson" +local Serf = require "kong.cli.services.serf" + +local pairs = pairs +local table_insert = table.insert +local string_upper = string.upper + +return { + ["/cluster/"] = { + GET = function(self, dao_factory, helpers) + local res, err = Serf(configuration):invoke_signal("members", {["-format"] = "json"}) + if err then + return responses.send_HTTP_INTERNAL_SERVER_ERROR(err) + end + + local members = cjson.decode(res).members + local result = {data = {}} + for _, v in pairs(members) do + if not self.params.status or (self.params.status and v.status == self.params.status) then + table_insert(result.data, { + name = v.name, + address = v.addr, + status = v.status + }) + end + end + + result.total = #result.data + return responses.send_HTTP_OK(result) + end, + + DELETE = function(self, dao_factory) + if not self.params.name then + return responses.send_HTTP_BAD_REQUEST("Missing node \"name\"") + end + + local _, err = Serf(configuration):invoke_signal("force-leave", {self.params.name}) + if err then + return responses.send_HTTP_BAD_REQUEST(err) + end + + return responses.send_HTTP_OK() + end, + + POST = function(self, dao_factory) + if not self.params.address then + return responses.send_HTTP_BAD_REQUEST("Missing node \"address\"") + end + + local _, err = Serf(configuration):invoke_signal("join", {self.params.address}) + if err then + return responses.send_HTTP_BAD_REQUEST(err) + end + + return responses.send_HTTP_OK() + end + }, + + ["/cluster/events/"] = { + POST = function(self, dao_factory) + local message_t = self.params + + -- The type is always upper case + if message_t.type then + message_t.type = string_upper(message_t.type) + end + + -- If it's an update, load the new entity too so it's available in the hooks + if message_t.type == events.TYPES.ENTITY_UPDATED then + message_t.old_entity = message_t.entity + message_t.entity = dao[message_t.collection]:find_by_primary_key({id = message_t.old_entity.id}) + if not message_t.entity then + -- This means that the entity has been deleted immediately after an update in the meanwhile that + -- the system was still processing the update. A delete invalidation will come immediately after + -- so we can ignore this event + return responses.send_HTTP_OK() + end + end + + -- Trigger event in the node + events:publish(message_t.type, message_t) + + return responses.send_HTTP_OK() + end + } +} diff --git a/kong/api/routes/kong.lua b/kong/api/routes/kong.lua index 7b663575ff6..a5660c5f89e 100644 --- a/kong/api/routes/kong.lua +++ b/kong/api/routes/kong.lua @@ -1,5 +1,6 @@ local constants = require "kong.constants" local route_helpers = require "kong.api.route_helpers" +local utils = require "kong.tools.utils" return { ["/"] = { @@ -12,12 +13,13 @@ return { return helpers.responses.send_HTTP_OK({ tagline = "Welcome to Kong", version = constants.VERSION, - hostname = route_helpers.get_hostname(), + hostname = utils.get_hostname(), plugins = { available_on_server = configuration.plugins, enabled_in_cluster = db_plugins }, - lua_version = jit and jit.version or _VERSION + lua_version = jit and jit.version or _VERSION, + configuration = configuration }) end }, diff --git a/kong/cli/cmds/cluster.lua b/kong/cli/cmds/cluster.lua new file mode 100644 index 00000000000..a3b2961feda --- /dev/null +++ b/kong/cli/cmds/cluster.lua @@ -0,0 +1,52 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local logger = require "kong.cli.utils.logger" +local utils = require "kong.tools.utils" +local config_loader = require "kong.tools.config_loader" +local Serf = require "kong.cli.services.serf" +local lapp = require("lapp") +local args = lapp(string.format([[ +Kong cluster operations. + +Usage: kong cluster [options] + +Commands: + (string) where is one of: + members, force-leave, reachability, keygen + +Options: + -c,--config (default %s) path to configuration file + +]], constants.CLI.GLOBAL_KONG_CONF)) + +local KEYGEN = "keygen" +local FORCE_LEAVE = "force-leave" +local SUPPORTED_COMMANDS = {"members", KEYGEN, "reachability", FORCE_LEAVE} + +if not utils.table_contains(SUPPORTED_COMMANDS, args.command) then + lapp.quit("Invalid cluster command. Supported commands are: "..table.concat(SUPPORTED_COMMANDS, ", ")) +end + +local configuration = config_loader.load_default(args.config) + +local signal = args.command +args.command = nil +args.config = nil + +local skip_running_check + +if signal == FORCE_LEAVE and utils.table_size(args) ~= 1 then + logger:error("You must specify a node name") + os.exit(1) +elseif signal == KEYGEN then + skip_running_check = true +end + +local res, err = Serf(configuration):invoke_signal(signal, args, false, skip_running_check) +if err then + logger:error(err) + os.exit(1) +end + +logger:print(res) diff --git a/kong/cli/config.lua b/kong/cli/cmds/config.lua similarity index 75% rename from kong/cli/config.lua rename to kong/cli/cmds/config.lua index b96a3717401..d0aee90831b 100644 --- a/kong/cli/config.lua +++ b/kong/cli/cmds/config.lua @@ -1,9 +1,10 @@ #!/usr/bin/env luajit local constants = require "kong.constants" -local cutils = require "kong.cli.utils" +local logger = require "kong.cli.utils.logger" local IO = require "kong.tools.io" local yaml = require "yaml" +local config_loader = require "kong.tools.config_loader" local args = require("lapp")(string.format([[ For development purposes only. @@ -19,9 +20,7 @@ Options: local CONFIG_FILENAME = string.format("kong%s.yml", args.env ~= "" and "_"..args.env or "") -local config_path = cutils.get_kong_config_path(args.config) -local config_content = IO.read_file(config_path) -local default_config = yaml.load(config_content) +local configuration = config_loader.load_default(args.config) local env = args.env:upper() local DEFAULT_ENV_VALUES = { @@ -29,10 +28,11 @@ local DEFAULT_ENV_VALUES = { yaml = { ["nginx_working_dir"] = "nginx_tmp", ["send_anonymous_reports"] = false, - ["proxy_port"] = 8100, - ["proxy_ssl_port"] = 8543, - ["admin_api_port"] = 8101, - ["dnsmasq_port"] = 8153, + ["proxy_listen"] = "0.0.0.0:8100", + ["proxy_listen_ssl"] = "0.0.0.0:8543", + ["admin_api_listen"] = "0.0.0.0:8101", + ["cluster_listen"] = "0.0.0.0:9100", + ["cluster_listen_rpc"] = "0.0.0.0:9101", ["cassandra"] = { ["keyspace"] = "kong_tests" } @@ -61,7 +61,8 @@ local DEFAULT_ENV_VALUES = { } if not DEFAULT_ENV_VALUES[args.env:upper()] then - cutils.error_exit(string.format("Unregistered environment '%s'", args.env:upper())) + logger:error(string.format("Unregistered environment '%s'", args.env:upper())) + os.exit(1) end -- Create the new configuration as a new blank object @@ -75,8 +76,12 @@ end -- Dump into a string local new_config_content = yaml.dump(new_config) +-- Workaround for https://github.com/lubyk/yaml/issues/2 +-- This workaround is in two places. To remove it "Find and replace" in the code +new_config_content = string.gsub(new_config_content, "(%w+:%s*)([%w%.]+:%d+)", "%1\"%2\"") + -- Replace nginx directives -local nginx_config = default_config.nginx +local nginx_config = configuration.nginx for k, v in pairs(DEFAULT_ENV_VALUES[env].nginx) do nginx_config = nginx_config:gsub(k, v) end @@ -93,5 +98,6 @@ nginx: | local ok, err = IO.write_to_file(IO.path:join(args.output, CONFIG_FILENAME), new_config_content) if not ok then - cutils.error_exit(err) + logger:error(err) + os.exit(1) end diff --git a/kong/cli/migrations.lua b/kong/cli/cmds/migrations.lua similarity index 64% rename from kong/cli/migrations.lua rename to kong/cli/cmds/migrations.lua index 84ee56dbdeb..38993478721 100644 --- a/kong/cli/migrations.lua +++ b/kong/cli/cmds/migrations.lua @@ -2,10 +2,10 @@ local Migrations = require "kong.tools.migrations" local constants = require "kong.constants" -local cutils = require "kong.cli.utils" +local logger = require "kong.cli.utils.logger" local utils = require "kong.tools.utils" local input = require "kong.cli.utils.input" -local config = require "kong.tools.config_loader" +local config_loader = require "kong.tools.config_loader" local dao = require "kong.tools.dao_loader" local lapp = require "lapp" local args = lapp(string.format([[ @@ -28,8 +28,7 @@ if args.command == "migrations" then lapp.quit("Missing required .") end -local config_path = cutils.get_kong_config_path(args.config) -local configuration = config.load(config_path) +local configuration = config_loader.load_default(args.config) local dao_factory = dao.load(configuration) local migrations = Migrations(dao_factory, configuration) @@ -37,7 +36,8 @@ local kind = args.type if kind ~= "all" and kind ~= "core" then -- Assuming we are trying to run migrations for a plugin if not utils.table_contains(configuration.plugins, kind) then - cutils.logger:error_exit("No \""..kind.."\" plugin enabled in the configuration.") + logger:error("No \""..kind.."\" plugin enabled in the configuration.") + os.exit(1) end end @@ -45,107 +45,113 @@ if args.command == "list" then local migrations, err = dao_factory.migrations:get_migrations() if err then - cutils.logger:error_exit(err) + logger:error(err) + os.exit(1) elseif migrations then - cutils.logger:info(string.format( + logger:info(string.format( "Executed migrations for keyspace %s (%s):", - cutils.colors.yellow(dao_factory.properties.keyspace), + logger.colors.yellow(dao_factory.properties.keyspace), dao_factory.type )) for _, row in ipairs(migrations) do - cutils.logger:info(string.format("%s: %s", - cutils.colors.yellow(row.id), + logger:info(string.format("%s: %s", + logger.colors.yellow(row.id), table.concat(row.migrations, ", ") )) end else - cutils.logger:info(string.format( + logger:info(string.format( "No migrations have been run yet for %s on keyspace: %s", - cutils.colors.yellow(dao_factory.type), - cutils.colors.yellow(dao_factory.properties.keyspace) + logger.colors.yellow(dao_factory.type), + logger.colors.yellow(dao_factory.properties.keyspace) )) end elseif args.command == "up" then local function before(identifier) - cutils.logger:info(string.format( + logger:info(string.format( "Migrating %s on keyspace \"%s\" (%s)", - cutils.colors.yellow(identifier), - cutils.colors.yellow(dao_factory.properties.keyspace), + logger.colors.yellow(identifier), + logger.colors.yellow(dao_factory.properties.keyspace), dao_factory.type )) end local function on_each_success(identifier, migration) - cutils.logger:info(string.format( + logger:info(string.format( "%s migrated up to: %s", identifier, - cutils.colors.yellow(migration.name) + logger.colors.yellow(migration.name) )) end if kind == "all" then local err = migrations:run_all_migrations(before, on_each_success) if err then - cutils.logger:error_exit(err) + logger:error(err) + os.exit(1) end else local err = migrations:run_migrations(kind, before, on_each_success) if err then - cutils.logger:error_exit(err) + logger:error(err) + os.exit(1) end end - cutils.logger:success("Schema up to date") + logger:success("Schema up to date") elseif args.command == "down" then if kind == "all" then - cutils.logger:error_exit("You must specify 'core' or a plugin name for this command.") + logger:error("You must specify 'core' or a plugin name for this command.") + os.exit(1) end local function before(identifier) - cutils.logger:info(string.format( + logger:info(string.format( "Rollbacking %s in keyspace \"%s\" (%s)", - cutils.colors.yellow(identifier), - cutils.colors.yellow(dao_factory.properties.keyspace), + logger.colors.yellow(identifier), + logger.colors.yellow(dao_factory.properties.keyspace), dao_factory.type )) end local function on_success(identifier, migration) if migration then - cutils.logger:success("\""..identifier.."\" rollbacked: "..cutils.colors.yellow(migration.name)) + logger:success("\""..identifier.."\" rollbacked: "..logger.colors.yellow(migration.name)) else - cutils.logger:success("No migration to rollback") + logger:success("No migration to rollback") end end local err = migrations:run_rollback(kind, before, on_success) if err then - cutils.logger:error_exit(err) + logger:error(err) + os.exit(1) end elseif args.command == "reset" then local keyspace = dao_factory.properties.keyspace - cutils.logger:info(string.format( + logger:info(string.format( "Resetting \"%s\" keyspace (%s)", - cutils.colors.yellow(keyspace), + logger.colors.yellow(keyspace), dao_factory.type )) if input.confirm("Are you sure? You will lose all of your data, this operation is irreversible.") then local _, err = dao_factory.migrations:drop_keyspace(keyspace) if err then - cutils.logger:error_exit(err) + logger:error(err) + os.exit(1) else - cutils.logger:success("Keyspace successfully reset") + logger:success("Keyspace successfully reset") end end else lapp.quit("Invalid command: "..args.command) -end +end \ No newline at end of file diff --git a/kong/cli/cmds/quit.lua b/kong/cli/cmds/quit.lua new file mode 100644 index 00000000000..adcd59b7b57 --- /dev/null +++ b/kong/cli/cmds/quit.lua @@ -0,0 +1,32 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local logger = require "kong.cli.utils.logger" +local config_loader = require "kong.tools.config_loader" +local Nginx = require "kong.cli.services.nginx" +local services = require "kong.cli.utils.services" +local args = require("lapp")(string.format([[ +Graceful shutdown. Stop the Kong instance running in the configured 'nginx_working_dir' directory. + +Usage: kong stop [options] + +Options: + -c,--config (default %s) path to configuration file +]], constants.CLI.GLOBAL_KONG_CONF)) + +local configuration, configuration_path = config_loader.load_default(args.config) + +local nginx = Nginx(configuration, configuration_path) + +if not nginx:is_running() then + logger:error("Kong is not running") + os.exit(1) +end + +nginx:quit() +while(nginx:is_running()) do + -- Wait until it quits +end + +services.stop_all(configuration, configuration_path) +logger:success("Stopped") \ No newline at end of file diff --git a/kong/cli/cmds/reload.lua b/kong/cli/cmds/reload.lua new file mode 100644 index 00000000000..014d2dcd80b --- /dev/null +++ b/kong/cli/cmds/reload.lua @@ -0,0 +1,33 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local logger = require "kong.cli.utils.logger" +local config_loader = require "kong.tools.config_loader" +local Nginx = require "kong.cli.services.nginx" +local args = require("lapp")(string.format([[ +Gracefully reload the Kong instance running in the configured 'nginx_working_dir'. + +Any configuration change will be applied. + +Usage: kong reload [options] + +Options: + -c,--config (default %s) path to configuration file +]], constants.CLI.GLOBAL_KONG_CONF)) + +local configuration, configuration_path = config_loader.load_default(args.config) + +local nginx = Nginx(configuration, configuration_path) + +if not nginx:is_running() then + logger:error("Kong is not running") + os.exit(1) +end + +local _, err = nginx:reload() +if err then + logger:error(err) + os.exit(1) +end + +logger:success("Reloaded") diff --git a/kong/cli/restart.lua b/kong/cli/cmds/restart.lua similarity index 51% rename from kong/cli/restart.lua rename to kong/cli/cmds/restart.lua index cb316dff0cd..f1eca27bf42 100644 --- a/kong/cli/restart.lua +++ b/kong/cli/cmds/restart.lua @@ -1,8 +1,9 @@ #!/usr/bin/env luajit local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local signal = require "kong.cli.utils.signal" +local config_loader = require "kong.tools.config_loader" +local services = require "kong.cli.utils.services" + local args = require("lapp")(string.format([[ Restart the Kong instance running in the configured 'nginx_working_dir'. @@ -15,16 +16,8 @@ Options: -c,--config (default %s) path to configuration file ]], constants.CLI.GLOBAL_KONG_CONF)) -if signal.is_running(args.config) then - if not signal.send_signal(args.config, signal.STOP) then - cutils.logger:error_exit("Could not stop Kong") - end -end - -signal.prepare_kong(args.config) +local configuration, configuration_path = config_loader.load_default(args.config) -if not signal.send_signal(args.config) then - cutils.logger:error_exit("Could not restart Kong") -end +services.stop_all(configuration, configuration_path) -cutils.logger:success("Restarted") +require("kong.cli.cmds.start") \ No newline at end of file diff --git a/kong/cli/cmds/start.lua b/kong/cli/cmds/start.lua new file mode 100755 index 00000000000..f20efb7ce34 --- /dev/null +++ b/kong/cli/cmds/start.lua @@ -0,0 +1,38 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local config_loader = require "kong.tools.config_loader" +local logger = require "kong.cli.utils.logger" +local services = require "kong.cli.utils.services" + +local args = require("lapp")(string.format([[ +Start Kong with given configuration. Kong will run in the configured 'nginx_working_dir' directory. + +Usage: kong start [options] + +Options: + -c,--config (default %s) path to configuration file +]], constants.CLI.GLOBAL_KONG_CONF)) + +logger:info("Kong "..constants.VERSION) + +local configuration, configuration_path = config_loader.load_default(args.config) + +local status = services.check_status(configuration, configuration_path) +if status == services.STATUSES.SOME_RUNNING then + logger:error("Some services required by Kong are already running. Please execute \"kong restart\"!") + os.exit(1) +elseif status == services.STATUSES.ALL_RUNNING then + logger:error("Kong is currently running") + os.exit(1) +end + +local ok, err = services.start_all(configuration, configuration_path) +if not ok then + services.stop_all(configuration, configuration_path) + logger:error(err) + logger:error("Could not start Kong") + os.exit(1) +end + +logger:success("Started") \ No newline at end of file diff --git a/kong/cli/cmds/status.lua b/kong/cli/cmds/status.lua new file mode 100644 index 00000000000..b398cead3c7 --- /dev/null +++ b/kong/cli/cmds/status.lua @@ -0,0 +1,28 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local logger = require "kong.cli.utils.logger" +local services = require "kong.cli.utils.services" +local config_loader = require "kong.tools.config_loader" +local args = require("lapp")(string.format([[ +Checks the status of Kong and its services. Returns an error if the services are not properly running. + +Usage: kong status [options] + +Options: + -c,--config (default %s) path to configuration file +]], constants.CLI.GLOBAL_KONG_CONF)) + +local configuration, configuration_path = config_loader.load_default(args.config) + +local status = services.check_status(configuration, configuration_path) +if status == services.STATUSES.ALL_RUNNING then + logger:info("Kong is running") + os.exit(0) +elseif status == services.STATUSES.SOME_RUNNING then + logger:error("Some services required by Kong are not running. Please execute \"kong restart\"!") + os.exit(1) +else + logger:error("Kong is not running") + os.exit(1) +end \ No newline at end of file diff --git a/kong/cli/cmds/stop.lua b/kong/cli/cmds/stop.lua new file mode 100755 index 00000000000..05af35ca4d6 --- /dev/null +++ b/kong/cli/cmds/stop.lua @@ -0,0 +1,26 @@ +#!/usr/bin/env luajit + +local constants = require "kong.constants" +local logger = require "kong.cli.utils.logger" +local services = require "kong.cli.utils.services" +local config_loader = require "kong.tools.config_loader" +local args = require("lapp")(string.format([[ +Fast shutdown. Stop the Kong instance running in the configured 'nginx_working_dir' directory. + +Usage: kong stop [options] + +Options: + -c,--config (default %s) path to configuration file +]], constants.CLI.GLOBAL_KONG_CONF)) + +local configuration, configuration_path = config_loader.load_default(args.config) + +local status = services.check_status(configuration, configuration_path) +if status == services.STATUSES.NOT_RUNNING then + logger:error("Kong is not running") + os.exit(1) +end + +services.stop_all(configuration, configuration_path) + +logger:success("Stopped") \ No newline at end of file diff --git a/kong/cli/cmds/version.lua b/kong/cli/cmds/version.lua new file mode 100644 index 00000000000..312fd46810f --- /dev/null +++ b/kong/cli/cmds/version.lua @@ -0,0 +1,6 @@ +#!/usr/bin/env luajit + +local logger = require "kong.cli.utils.logger" +local constants = require "kong.constants" + +logger:print(string.format("Kong version: %s", constants.VERSION)) diff --git a/kong/cli/db.lua b/kong/cli/db.lua deleted file mode 100644 index 8862d4baf62..00000000000 --- a/kong/cli/db.lua +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env luajit - -local Faker = require "kong.tools.faker" -local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local config = require "kong.tools.config_loader" -local dao = require "kong.tools.dao_loader" -local lapp = require("lapp") - -local args = lapp(string.format([[ -For development purposes only. - -Seed the database with random data or drop it. - -Usage: kong db [options] - -Commands: - (string) where is one of: - seed, drop - -Options: - -c,--config (default %s) path to configuration file - -r,--random flag to also insert random entities - -n,--number (default 1000) number of random entities to insert if --random -]], constants.CLI.GLOBAL_KONG_CONF)) - --- $ kong db -if args.command == "db" then - lapp.quit("Missing required .") -end - -local config_path = cutils.get_kong_config_path(args.config) -local config = config.load(config_path) -local dao_factory = dao.load(config) - -if args.command == "seed" then - - -- Drop if exists - local err = dao_factory:drop() - if err then - cutils.logger:error_exit(err) - end - - local faker = Faker(dao_factory) - faker:seed(args.random and args.number or nil) - cutils.logger:success("Populated") - -elseif args.command == "drop" then - - local err = dao_factory:drop() - if err then - cutils.logger:error_exit(err) - end - - cutils.logger:success("Dropped") - -else - lapp.quit("Invalid command: "..args.command) -end diff --git a/kong/cli/quit.lua b/kong/cli/quit.lua deleted file mode 100644 index 957487d5c5f..00000000000 --- a/kong/cli/quit.lua +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env luajit - -local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local signal = require "kong.cli.utils.signal" -local args = require("lapp")(string.format([[ -Graceful shutdown. Stop the Kong instance running in the configured 'nginx_working_dir' directory. - -Usage: kong stop [options] - -Options: - -c,--config (default %s) path to configuration file -]], constants.CLI.GLOBAL_KONG_CONF)) - --- Check if running, will exit if not -local running, err = signal.is_running(args.config) -if not running then - cutils.logger:error_exit(err) -end - --- Send 'quit' signal (graceful shutdown) -if signal.send_signal(args.config, signal.QUIT) then - cutils.logger:success("Stopped") -else - cutils.logger:error_exit("Could not gracefully stop Kong") -end diff --git a/kong/cli/reload.lua b/kong/cli/reload.lua deleted file mode 100644 index 100133dee11..00000000000 --- a/kong/cli/reload.lua +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env luajit - -local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local signal = require "kong.cli.utils.signal" -local args = require("lapp")(string.format([[ -Gracefully reload the Kong instance running in the configured 'nginx_working_dir'. - -Any configuration change will be applied. - -Usage: kong reload [options] - -Options: - -c,--config (default %s) path to configuration file -]], constants.CLI.GLOBAL_KONG_CONF)) - -if not signal.is_running(args.config) then - cutils.logger:error_exit("Could not reload: Kong is not running.") -end - -signal.prepare_kong(args.config, signal.RELOAD) - -if signal.send_signal(args.config, signal.RELOAD) then - cutils.logger:success("Reloaded") -else - cutils.logger:error_exit("Could not reload Kong") -end diff --git a/kong/cli/services/base_service.lua b/kong/cli/services/base_service.lua new file mode 100644 index 00000000000..171a68666fb --- /dev/null +++ b/kong/cli/services/base_service.lua @@ -0,0 +1,105 @@ +local Object = require "classic" +local IO = require "kong.tools.io" +local stringy = require "stringy" +local utils = require "kong.tools.utils" + +local BaseService = Object:extend() + +function BaseService.find_cmd(app_name, additional_paths, check_path_func) + local found_file_paths = {} + + if IO.cmd_exists(app_name) then + if not check_path_func then + return app_name + else + table.insert(found_file_paths, app_name) + end + end + + -- These are some default locations we are always looking into + local search_dirs = utils.table_merge({ + "/usr/local/sbin", + "/usr/local/bin", + "/usr/sbin", + "/usr/bin", + "/bin" + }, additional_paths and additional_paths or {}) + + for _, search_dir in ipairs(search_dirs) do + local file_path = search_dir..(stringy.endswith(search_dir, "/") and "" or "/")..app_name + if IO.file_exists(file_path) then + table.insert(found_file_paths, file_path) + end + end + + if check_path_func then + for _, found_file_path in ipairs(found_file_paths) do + if check_path_func(found_file_path) then + return found_file_path + end + end + elseif #found_file_paths > 0 then + -- Just return the first path + return table.remove(found_file_paths, 1) + end + + return nil +end + +function BaseService:new(name, nginx_working_dir) + self._name = name + self._pid_file_path = nginx_working_dir + ..(stringy.endswith(nginx_working_dir, "/") and "" or "/") + ..name..".pid" +end + +function BaseService:is_running() + local result = false + + local pid = IO.read_file(self._pid_file_path) + if pid then + local _, code = IO.os_execute("kill -0 "..stringy.strip(pid)) + if code and code == 0 then + result = pid + end + end + + return result +end + +function BaseService:_get_cmd(additional_paths, check_path_func) + local cmd = BaseService.find_cmd(self._name, additional_paths, check_path_func) + if not cmd then + return nil, "Can't find "..self._name + end + return cmd +end + +function BaseService:start() + -- Returns an error if not implemented + error("Not implemented") +end + +function BaseService:prepare(working_dir) + -- Create nginx folder if needed + local _, err = IO.path:mkdir(working_dir) + if err then + return false, err + end + return true +end + +function BaseService:stop(force) + local pid = self:is_running() + if pid then + IO.os_execute("kill "..(force and "-9 " or "")..pid) + while self:is_running() do + -- Wait + end + if force then + os.remove(self._pid_file_path) -- Because forcing the kill doesn't kill the PID file + end + end +end + +return BaseService \ No newline at end of file diff --git a/kong/cli/services/dnsmasq.lua b/kong/cli/services/dnsmasq.lua new file mode 100644 index 00000000000..0067ae37d5a --- /dev/null +++ b/kong/cli/services/dnsmasq.lua @@ -0,0 +1,52 @@ +local BaseService = require "kong.cli.services.base_service" +local logger = require "kong.cli.utils.logger" +local IO = require "kong.tools.io" + +local Dnsmasq = BaseService:extend() + +local SERVICE_NAME = "dnsmasq" + +function Dnsmasq:new(configuration) + self._configuration = configuration + Dnsmasq.super.new(self, SERVICE_NAME, self._configuration.nginx_working_dir) +end + +function Dnsmasq:prepare() + return Dnsmasq.super.prepare(self, self._configuration.nginx_working_dir) +end + +function Dnsmasq:start() + if self._configuration.dns_resolver.dnsmasq then + if self:is_running() then + return nil, SERVICE_NAME.." is already running" + end + + local cmd, err = Dnsmasq.super._get_cmd(self) + if err then + return nil, err + end + + -- dnsmasq always listens on the local 127.0.0.1 address + local res, code = IO.os_execute(cmd.." -p "..self._configuration.dns_resolver.port.." --pid-file="..self._pid_file_path.." -N -o --listen-address=127.0.0.1") + if code == 0 then + while not self:is_running() do + -- Wait for PID file to be created + end + + setmetatable(self._configuration.dns_resolver, require "kong.tools.printable") + logger:info(string.format([[dnsmasq............%s]], tostring(self._configuration.dns_resolver))) + return true + else + return nil, res + end + end + return true +end + +function Dnsmasq:stop() + if self._configuration.dns_resolver.dnsmasq then + Dnsmasq.super.stop(self, true) -- Killing dnsmasq just with "kill PID" apparently doesn't terminate it + end +end + +return Dnsmasq \ No newline at end of file diff --git a/kong/cli/services/nginx.lua b/kong/cli/services/nginx.lua new file mode 100644 index 00000000000..f8e2e01f7bc --- /dev/null +++ b/kong/cli/services/nginx.lua @@ -0,0 +1,244 @@ +local BaseService = require "kong.cli.services.base_service" +local IO = require "kong.tools.io" +local logger = require "kong.cli.utils.logger" +local ssl = require "kong.cli.utils.ssl" +local constants = require "kong.constants" +local syslog = require "kong.tools.syslog" +local socket = require "socket" + +local Nginx = BaseService:extend() + +local SERVICE_NAME = "nginx" +local START = "start" +local RELOAD = "reload" +local STOP = "stop" +local QUIT = "quit" + +local function prepare_folders(configuration) + -- Create nginx folder if needed + local _, err = IO.path:mkdir(IO.path:join(configuration.nginx_working_dir, "logs")) + if err then + return false, err + end + + -- Create logs files + os.execute("touch "..IO.path:join(configuration.nginx_working_dir, "logs", "error.log")) + os.execute("touch "..IO.path:join(configuration.nginx_working_dir, "logs", "access.log")) + + -- Create SSL folder if needed + local _, err = IO.path:mkdir(IO.path:join(configuration.nginx_working_dir, "ssl")) + if err then + return false, err + end + + return true +end + +local function prepare_ssl_certificates(configuration) + local _, err = ssl.prepare_ssl(configuration) + if err then + return false, err + end + + local res, err = ssl.get_ssl_cert_and_key(configuration) + if err then + return false, err + end + + local trusted_ssl_cert_path = configuration.dao_config.ssl_certificate -- DAO ssl cert + + return { ssl_cert_path = res.ssl_cert_path, + ssl_key_path = res.ssl_key_path, + trusted_ssl_cert_path = trusted_ssl_cert_path } +end + +local function get_current_user() + return IO.os_execute("whoami") +end + +local function prepare_nginx_configuration(configuration, ssl_config) + -- Extract nginx config from kong config, replace any needed value + local nginx_config = configuration.nginx + local nginx_inject = { + user = get_current_user(), + proxy_listen = configuration.proxy_listen, + proxy_listen_ssl = configuration.proxy_listen_ssl, + admin_api_listen = configuration.admin_api_listen, + dns_resolver = configuration.dns_resolver.address, + memory_cache_size = configuration.memory_cache_size, + ssl_cert = ssl_config.ssl_cert_path, + ssl_key = ssl_config.ssl_key_path, + lua_ssl_trusted_certificate = ssl_config.trusted_ssl_cert_path ~= nil and "lua_ssl_trusted_certificate \""..ssl_config.trusted_ssl_cert_path.."\";" or "" + } + + -- Auto-tune + local res, code = IO.os_execute("ulimit -n") + if code == 0 then + nginx_inject.auto_worker_rlimit_nofile = res + nginx_inject.auto_worker_connections = tonumber(res) > 16384 and 16384 or res + else + return false, "Can't determine ulimit" + end + + -- Inject properties + for k, v in pairs(nginx_inject) do + nginx_config = nginx_config:gsub("{{"..k.."}}", v) + end + + -- Inject anonymous reports + if configuration.send_anonymous_reports then + -- If there is no internet connection, disable this feature + if socket.dns.toip(constants.SYSLOG.ADDRESS) then + nginx_config = "error_log syslog:server="..constants.SYSLOG.ADDRESS..":"..tostring(constants.SYSLOG.PORT).." error;\n"..nginx_config + else + logger:warn("The internet connection might not be available, cannot resolve "..constants.SYSLOG.ADDRESS) + end + end + + -- Write nginx config + local ok, err = IO.write_to_file(IO.path:join(configuration.nginx_working_dir, constants.CLI.NGINX_CONFIG), nginx_config) + if not ok then + return false, err + end +end + +function Nginx:new(configuration, configuration_path) + self._configuration = configuration + self._configuration_path = configuration_path + + Nginx.super.new(self, SERVICE_NAME, self._configuration.nginx_working_dir) +end + +function Nginx:prepare() + -- Create working directory if missing + local ok, err = Nginx.super.prepare(self, self._configuration.nginx_working_dir) + if not ok then + return nil, err + end + + -- Preparing nginx folders + local _, err = prepare_folders(self._configuration) + if err then + return false, err + end + + -- Preparing SSL certificates + local res, err = prepare_ssl_certificates(self._configuration) + if err then + return false, err + end + + -- Preparing the Nginx configuration file + local _, err = prepare_nginx_configuration(self._configuration, res) + if err then + return false, err + end + + return true +end + +function Nginx:_invoke_signal(cmd, signal) + local full_nginx_cmd = string.format("KONG_CONF=%s %s -p %s -c %s -g 'pid %s;' %s", + self._configuration_path, + cmd, + self._configuration.nginx_working_dir, + constants.CLI.NGINX_CONFIG, + self._pid_file_path, + signal == START and "" or "-s "..signal) + + -- Check ulimit value + if signal == START or signal == RELOAD then + local res, code = IO.os_execute("ulimit -n") + if code == 0 and tonumber(res) < 4096 then + logger:warn("ulimit is currently set to \""..res.."\". For better performance set it to at least \"4096\" using \"ulimit -n\"") + end + end + + -- Report signal action + if self._configuration.send_anonymous_reports then + syslog.log({signal=signal}) + end + + -- Start failure handler + local res, code = IO.os_execute(full_nginx_cmd) + if code == 0 then + return true + else + return false, res + end +end + +function Nginx:_get_cmd() + local cmd, err = Nginx.super._get_cmd(self, { + "/usr/local/openresty/nginx/sbin/", + "/usr/local/opt/openresty/bin/", + "/usr/local/bin/", + "/usr/sbin/" + }, function(path) + local res, code = IO.os_execute(path.." -v") + if code == 0 then + return res:match("^nginx version: ngx_openresty/") or + res:match("^nginx version: openresty/") + end + + return false + end) + + return cmd, err +end + +function Nginx:start() + if self:is_running() then + return nil, SERVICE_NAME.." is already running" + end + + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + local ok, err = self:_invoke_signal(cmd, START) + if ok then + local listen_addresses = { + proxy_listen = self._configuration.proxy_listen, + proxy_listen_ssl = self._configuration.proxy_listen_ssl, + admin_api_listen = self._configuration.admin_api_listen + } + setmetatable(listen_addresses, require "kong.tools.printable") + logger:info(string.format([[nginx .............%s]], tostring(listen_addresses))) + end + + return ok, err +end + +function Nginx:stop() + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + local _, err = self:_invoke_signal(cmd, STOP) + if not err then + os.execute("while [ -f "..self._pid_file_path.." ]; do sleep 0.5; done") + end +end + +function Nginx:reload() + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + return self:_invoke_signal(cmd, RELOAD) +end + +function Nginx:quit() + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + return self:_invoke_signal(cmd, QUIT) +end + +return Nginx \ No newline at end of file diff --git a/kong/cli/services/serf.lua b/kong/cli/services/serf.lua new file mode 100644 index 00000000000..1f411bb0cbc --- /dev/null +++ b/kong/cli/services/serf.lua @@ -0,0 +1,235 @@ +local BaseService = require "kong.cli.services.base_service" +local logger = require "kong.cli.utils.logger" +local IO = require "kong.tools.io" +local stringy = require "stringy" +local cjson = require "cjson" +local cluster_utils = require "kong.tools.cluster" +local dao = require "kong.tools.dao_loader" + +local Serf = BaseService:extend() + +local SERVICE_NAME = "serf" +local LOG_FILE = "/tmp/"..SERVICE_NAME..".log" +local START_TIMEOUT = 10 +local EVENT_NAME = "kong" + +function Serf:new(configuration) + local nginx_working_dir = configuration.nginx_working_dir + + self._configuration = configuration + self._script_path = nginx_working_dir + ..(stringy.endswith(nginx_working_dir, "/") and "" or "/") + .."serf_event.sh" + self._dao_factory = dao.load(self._configuration) + Serf.super.new(self, SERVICE_NAME, nginx_working_dir) +end + +function Serf:_get_cmd() + local cmd, err = Serf.super._get_cmd(self, {}, function(path) + local res, code = IO.os_execute(path.." version") + if code == 0 then + return res:match("^Serf v0.7.0") + end + + return false + end) + + return cmd, err +end + +function Serf:prepare() + -- Create working directory if missing + local ok, err = Serf.super.prepare(self, self._configuration.nginx_working_dir) + if not ok then + return nil, err + end + + -- Create serf event handler + local luajit_path = BaseService.find_cmd("luajit") + if not luajit_path then + return nil, "Can't find luajit" + end + + local script = [[ +#!/bin/sh +PAYLOAD=`cat` # Read from stdin + +if [ "$SERF_EVENT" != "user" ]; then + PAYLOAD="{\"type\":\"${SERF_EVENT}\",\"entity\": \"${PAYLOAD}\"}" +fi + +echo $PAYLOAD > /tmp/payload + +COMMAND='require("kong.tools.http_client").post("http://]]..self._configuration.admin_api_listen..[[/cluster/events/", ]].."[['${PAYLOAD}']]"..[[, {["content-type"] = "application/json"})' + +echo $COMMAND | ]]..luajit_path..[[ +]] + local _, err = IO.write_to_file(self._script_path, script) + if err then + return false, err + end + + -- Adding executable permissions + local res, code = IO.os_execute("chmod +x "..self._script_path) + if code ~= 0 then + return false, res + end + + return true +end + +function Serf:_join_node(address) + local _, err = self:invoke_signal("join", {address}) + if err then + return false + end + return true +end + +function Serf:_autojoin(current_node_name) + if self._configuration.cluster["auto-join"] then + logger:info("Trying to auto-join Kong nodes, please wait..") + + -- Delete current node just in case it was there (due to an inconsistency caused by a crash) + local _, err = self._dao_factory.nodes:delete({ + name = current_node_name + }) + if err then + return false, tostring(err) + end + + local nodes, err = self._dao_factory.nodes:find_all() + if err then + return false, tostring(err) + else + if #nodes == 0 then + logger:warn("Cannot auto-join the cluster because no nodes were found") + else + -- Sort by newest to oldest (although by TTL would be a better sort) + table.sort(nodes, function(a, b) + return a.created_at > b.created_at + end) + + local joined + for _, v in ipairs(nodes) do + if self:_join_node(v.cluster_listening_address) then + logger:info("Successfully auto-joined "..v.cluster_listening_address) + joined = true + break + else + logger:warn("Cannot join "..v.cluster_listening_address..". If the node does not exist anymore it will be automatically purged.") + end + end + if not joined then + logger:warn("Could not join the existing cluster") + end + end + end + end + return true +end + +function Serf:start() + if self:is_running() then + return nil, SERVICE_NAME.." is already running" + end + + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + local node_name = cluster_utils.get_node_name(self._configuration) + + -- Prepare arguments + local cmd_args = { + ["-bind"] = self._configuration.cluster_listen, + ["-rpc-addr"] = self._configuration.cluster_listen_rpc, + ["-advertise"] = self._configuration.cluster.advertise, + ["-encrypt"] = self._configuration.cluster.encrypt, + ["-log-level"] = "err", + ["-profile"] = "wan", + ["-node"] = node_name, + ["-event-handler"] = "member-join,member-leave,member-failed,member-update,member-reap,user:"..EVENT_NAME.."="..self._script_path + } + + setmetatable(cmd_args, require "kong.tools.printable") + local str_cmd_args = tostring(cmd_args) + local res, code = IO.os_execute("nohup "..cmd.." agent "..str_cmd_args.." > "..LOG_FILE.." 2>&1 & echo $! > "..self._pid_file_path) + if code == 0 then + + -- Wait for process to start, with a timeout + local start = os.time() + while not (IO.file_exists(LOG_FILE) and string.match(IO.read_file(LOG_FILE), "running") or (os.time() > start + START_TIMEOUT)) do + -- Wait + end + + if self:is_running() then + logger:info(string.format([[serf ..............%s]], str_cmd_args)) + + -- Auto-Join nodes + return self:_autojoin(node_name) + else + -- Get last error message + local parts = stringy.split(IO.read_file(LOG_FILE), "\n") + return nil, "Could not start serf: "..string.gsub(parts[#parts - 1], "==> ", "") + end + else + return nil, res + end +end + +function Serf:invoke_signal(signal, args, no_rpc, skip_running_check) + if not skip_running_check and not self:is_running() then + return nil, SERVICE_NAME.." is not running" + end + + local cmd, err = self:_get_cmd() + if err then + return nil, err + end + + if not args then args = {} end + setmetatable(args, require "kong.tools.printable") + local res, code = IO.os_execute(cmd.." "..signal.." "..(no_rpc and "" or "-rpc-addr="..self._configuration.cluster_listen_rpc).." "..tostring(args), true) + if code == 0 then + return res + else + return false, res + end +end + +function Serf:event(t_payload) + local args = { + ["-coalesce"] = false, + ["-rpc-addr"] = self._configuration.cluster_listen_rpc + } + setmetatable(args, require "kong.tools.printable") + + local encoded_payload = cjson.encode(t_payload) + if string.len(encoded_payload) > 512 then + -- Serf can't send a payload greater than 512 bytes + return false, "Encoded payload is "..string.len(encoded_payload).." and it exceeds the limit of 512 bytes!" + end + + return self:invoke_signal("event "..tostring(args).." kong", {"'"..encoded_payload.."'"}, true) +end + +function Serf:stop() + logger:info("Leaving cluster..") + local _, err = self:invoke_signal("leave") + if err then + return false, err + else + -- Remove the node from the datastore. + -- This is useful when this is the only node running in the cluster. + self._dao_factory.nodes:delete({ + name = cluster_utils.get_node_name(self._configuration) + }) + + -- Finally stop Serf + Serf.super.stop(self, true) + end +end + +return Serf \ No newline at end of file diff --git a/kong/cli/start.lua b/kong/cli/start.lua deleted file mode 100755 index 1f6755d317b..00000000000 --- a/kong/cli/start.lua +++ /dev/null @@ -1,27 +0,0 @@ -#!/usr/bin/env luajit - -local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local signal = require "kong.cli.utils.signal" -local args = require("lapp")(string.format([[ -Start Kong with given configuration. Kong will run in the configured 'nginx_working_dir' directory. - -Usage: kong start [options] - -Options: - -c,--config (default %s) path to configuration file -]], constants.CLI.GLOBAL_KONG_CONF)) - --- Check if running, will exit if yes -local running = signal.is_running(args.config) -if running then - cutils.logger:error_exit("Could not start Kong because it is already running") -end - -signal.prepare_kong(args.config) - -if signal.send_signal(args.config) then - cutils.logger:success("Started") -else - cutils.logger:error_exit("Could not start Kong") -end diff --git a/kong/cli/stop.lua b/kong/cli/stop.lua deleted file mode 100755 index e7f7183a15b..00000000000 --- a/kong/cli/stop.lua +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env luajit - -local constants = require "kong.constants" -local cutils = require "kong.cli.utils" -local signal = require "kong.cli.utils.signal" -local args = require("lapp")(string.format([[ -Fast shutdown. Stop the Kong instance running in the configured 'nginx_working_dir' directory. - -Usage: kong stop [options] - -Options: - -c,--config (default %s) path to configuration file -]], constants.CLI.GLOBAL_KONG_CONF)) - --- Check if running, will exit if not -local running, err = signal.is_running(args.config) -if not running then - cutils.logger:error_exit(err) -end - --- Send 'stop' signal (fast shutdown) -if signal.send_signal(args.config, signal.STOP) then - cutils.logger:success("Stopped") -else - cutils.logger:error_exit("Could not stop Kong") -end diff --git a/kong/cli/utils.lua b/kong/cli/utils.lua deleted file mode 100644 index 88d5c349b94..00000000000 --- a/kong/cli/utils.lua +++ /dev/null @@ -1,144 +0,0 @@ ---[[ -Kong CLI utilities - - Logging - - Luarocks helpers -]] - -local ansicolors = require "ansicolors" -local constants = require "kong.constants" -local Object = require "classic" -local lpath = require "luarocks.path" -local IO = require "kong.tools.io" - --- --- Colors --- -local colors = {} -for _, v in ipairs({"red", "green", "yellow", "blue"}) do - colors[v] = function(str) return ansicolors("%{"..v.."}"..str.."%{reset}") end -end - -local function trim(s) - return (s:gsub("^%s*(.-)%s*$", "%1")) -end - --- --- Logging --- -local Logger = Object:extend() - -function Logger:new(silent) - self.silent = silent -end - -function Logger:print(str) - if not self.silent then - print(trim(str)) - end -end - -function Logger:info(str) - self:print(colors.blue("[INFO] ")..str) -end - -function Logger:success(str) - self:print(colors.green("[OK] ")..str) -end - -function Logger:warn(str) - self:print(colors.yellow("[WARN] ")..str) -end - -function Logger:error(str) - self:print(colors.red("[ERR] ")..str) -end - -function Logger:error_exit(str) - self:error(str) - os.exit(1) -end - -local logger = Logger() - --- --- Luarocks --- -local function get_kong_infos() - return { name = constants.NAME, version = constants.ROCK_VERSION } -end - -local function get_luarocks_dir() - local cfg = require "luarocks.cfg" - local search = require "luarocks.search" - local infos = get_kong_infos() - - local tree_map = {} - local results = {} - - for _, tree in ipairs(cfg.rocks_trees) do - local rocks_dir = lpath.rocks_dir(tree) - tree_map[rocks_dir] = tree - search.manifest_search(results, rocks_dir, search.make_query(infos.name:lower(), infos.version)) - end - - local version - for k, _ in pairs(results.kong) do - version = k - end - - return tree_map[results.kong[version][1].repo] -end - -local function get_luarocks_config_dir() - local repo = get_luarocks_dir() - local infos = get_kong_infos() - return lpath.conf_dir(infos.name:lower(), infos.version, repo) -end - -local function get_luarocks_install_dir() - local repo = get_luarocks_dir() - local infos = get_kong_infos() - return lpath.install_dir(infos.name:lower(), infos.version, repo) -end - -local function get_kong_config_path(args_config) - local config_path = args_config - - -- Use the rock's config if no config at default location - if not IO.file_exists(config_path) then - logger:warn("No configuration at: "..config_path.." using default config instead.") - config_path = IO.path:join(get_luarocks_config_dir(), "kong.yml") - end - - -- Make sure the configuration file really exists - if not IO.file_exists(config_path) then - logger:warn("No configuration at: "..config_path) - logger:error_exit("Could not find a configuration file.") - end - - return config_path -end - --- Checks if a port is available to bind a server to on localhost --- @param `port` The port to check --- @return `open` Truthy if available, falsy + error otherwise -local function is_port_bindable(port) - local server, success, err - server = require("socket").tcp() - server:setoption('reuseaddr', true) - success, err = server:bind("*", port) - if success then - success, err = server:listen() - end - server:close() - return success, err -end - -return { - colors = colors, - logger = logger, - get_kong_infos = get_kong_infos, - get_kong_config_path = get_kong_config_path, - get_luarocks_install_dir = get_luarocks_install_dir, - is_port_bindable = is_port_bindable -} diff --git a/kong/cli/utils/dnsmasq.lua b/kong/cli/utils/dnsmasq.lua deleted file mode 100644 index eb6ddfdbd87..00000000000 --- a/kong/cli/utils/dnsmasq.lua +++ /dev/null @@ -1,44 +0,0 @@ -local IO = require "kong.tools.io" -local cutils = require "kong.cli.utils" -local constants = require "kong.constants" -local stringy = require "stringy" - -local _M = {} - -function _M.stop(kong_config) - local pid_file = kong_config.nginx_working_dir.."/"..constants.CLI.DNSMASQ_PID - local _, code = IO.kill_process_by_pid_file(pid_file) - if code and code == 0 then - cutils.logger:info("dnsmasq stopped") - end -end - -function _M.start(nginx_working_dir, dnsmasq_port) - local cmd = IO.cmd_exists("dnsmasq") and "dnsmasq" - - if not cmd then -- Load dnsmasq given the PATH settings - local env_path = (os.getenv("PATH")..":" or "").."/usr/local/sbin:/usr/sbin" -- Also check in default paths - local paths = stringy.split(env_path, ":") - for _, path in ipairs(paths) do - if IO.file_exists(path..(stringy.endswith(path, "/") and "" or "/").."dnsmasq") then - cmd = path.."/dnsmasq" - break - end - end - end - - if not cmd then - cutils.logger:error_exit("Can't find dnsmasq") - end - - -- Start the dnsmasq daemon - local file_pid = nginx_working_dir..(stringy.endswith(nginx_working_dir, "/") and "" or "/")..constants.CLI.DNSMASQ_PID - local res, code = IO.os_execute(cmd.." -p "..dnsmasq_port.." --pid-file="..file_pid.." -N -o") - if code ~= 0 then - cutils.logger:error_exit(res) - else - cutils.logger:info("dnsmasq started ("..cmd..")") - end -end - -return _M \ No newline at end of file diff --git a/kong/cli/utils/logger.lua b/kong/cli/utils/logger.lua new file mode 100644 index 00000000000..22d0ec5d9fc --- /dev/null +++ b/kong/cli/utils/logger.lua @@ -0,0 +1,50 @@ +--[[ +Kong CLI logging +--]] + +local ansicolors = require "ansicolors" +local Object = require "classic" +local stringy = require "stringy" + +-- +-- Colors +-- +local colors = {} +for _, v in ipairs({"red", "green", "yellow", "blue"}) do + colors[v] = function(str) return ansicolors("%{"..v.."}"..str.."%{reset}") end +end + +-- +-- Logging +-- +local Logger = Object:extend() + +Logger.colors = colors + +function Logger:set_silent(silent) + self._silent = silent +end + +function Logger:print(str) + if not self._silent then + print(stringy.strip(str)) + end +end + +function Logger:info(str) + self:print(colors.blue("[INFO] ")..str) +end + +function Logger:success(str) + self:print(colors.green("[OK] ")..str) +end + +function Logger:warn(str) + self:print(colors.yellow("[WARN] ")..str) +end + +function Logger:error(str) + self:print(colors.red("[ERR] ")..str) +end + +return Logger() diff --git a/kong/cli/utils/luarocks.lua b/kong/cli/utils/luarocks.lua new file mode 100644 index 00000000000..2f27b7849ae --- /dev/null +++ b/kong/cli/utils/luarocks.lua @@ -0,0 +1,47 @@ +local constants = require "kong.constants" +local lpath = require "luarocks.path" + +local Luarocks = {} + +-- +-- Luarocks +-- +function Luarocks.get_kong_infos() + return { name = constants.NAME, version = constants.ROCK_VERSION } +end + +function Luarocks.get_dir() + local cfg = require "luarocks.cfg" + local search = require "luarocks.search" + local infos = Luarocks.get_kong_infos() + + local tree_map = {} + local results = {} + + for _, tree in ipairs(cfg.rocks_trees) do + local rocks_dir = lpath.rocks_dir(tree) + tree_map[rocks_dir] = tree + search.manifest_search(results, rocks_dir, search.make_query(infos.name:lower(), infos.version)) + end + + local version + for k, _ in pairs(results.kong) do + version = k + end + + return tree_map[results.kong[version][1].repo] +end + +function Luarocks.get_config_dir() + local repo = Luarocks.get_dir() + local infos = Luarocks.get_kong_infos() + return lpath.conf_dir(infos.name:lower(), infos.version, repo) +end + +function Luarocks.get_install_dir() + local repo = Luarocks.get_dir() + local infos = Luarocks.get_kong_infos() + return lpath.install_dir(infos.name:lower(), infos.version, repo) +end + +return Luarocks \ No newline at end of file diff --git a/kong/cli/utils/services.lua b/kong/cli/utils/services.lua new file mode 100644 index 00000000000..243d670ccdd --- /dev/null +++ b/kong/cli/utils/services.lua @@ -0,0 +1,104 @@ +local logger = require "kong.cli.utils.logger" +local dao = require "kong.tools.dao_loader" + +local _M = {} + +_M.STATUSES = { + ALL_RUNNING = "ALL_RUNNING", + SOME_RUNNING = "SOME_RUNNING", + NOT_RUNNINT = "NOT_RUNNING" +} + +-- Services ordered by priority +local services = { + require "kong.cli.services.dnsmasq", + require "kong.cli.services.nginx", + require "kong.cli.services.serf" +} + +local function prepare_database(configuration) + setmetatable(configuration.dao_config, require "kong.tools.printable") + logger:info(string.format([[database...........%s %s]], configuration.database, tostring(configuration.dao_config))) + + local dao_factory = dao.load(configuration) + local migrations = require("kong.tools.migrations")(dao_factory, configuration) + + local keyspace_exists, err = dao_factory.migrations:keyspace_exists() + if err then + return false, err + elseif not keyspace_exists then + logger:info("Database not initialized. Running migrations...") + end + + local function before(identifier) + logger:info(string.format( + "Migrating %s on keyspace \"%s\" (%s)", + logger.colors.yellow(identifier), + logger.colors.yellow(dao_factory.properties.keyspace), + dao_factory.type + )) + end + + local function on_each_success(identifier, migration) + logger:info(string.format( + "%s migrated up to: %s", + identifier, + logger.colors.yellow(migration.name) + )) + end + + local err = migrations:run_all_migrations(before, on_each_success) + if err then + return false, err + end + + return true +end + +function _M.check_status(configuration, configuration_path) + local running, not_running + + for index, service in ipairs(services) do + if service(configuration, configuration_path):is_running() then + running = true + print("INDEX "..index.." IS RUNNING") + else + not_running = true + end + end + + if running and not not_running then + return _M.STATUSES.ALL_RUNNING + elseif not_running and not running then + return _M.STATUSES.NOT_RUNNING + else + return _M.STATUSES.SOME_RUNNING + end +end + +function _M.stop_all(configuration, configuration_path) + for _, service in ipairs(services) do + service(configuration, configuration_path):stop() + end +end + +function _M.start_all(configuration, configuration_path) + -- Prepare database if not initialized yet + local _, err = prepare_database(configuration) + if err then + return false, err + end + + for _, v in ipairs(services) do + local obj = v(configuration, configuration_path) + obj:prepare() + local ok, err = obj:start() + if not ok then + return ok, err + end + end + + return true +end + +return _M \ No newline at end of file diff --git a/kong/cli/utils/signal.lua b/kong/cli/utils/signal.lua deleted file mode 100644 index 56249d8636d..00000000000 --- a/kong/cli/utils/signal.lua +++ /dev/null @@ -1,362 +0,0 @@ --- Send signals to the `nginx` executable --- Run the necessary so the nginx working dir (prefix) and database are correctly prepared --- @see http://nginx.org/en/docs/beginners_guide.html#control - -local IO = require "kong.tools.io" -local cutils = require "kong.cli.utils" -local ssl = require "kong.cli.utils.ssl" -local constants = require "kong.constants" -local syslog = require "kong.tools.syslog" -local socket = require "socket" -local dnsmasq = require "kong.cli.utils.dnsmasq" -local config = require "kong.tools.config_loader" -local dao = require "kong.tools.dao_loader" - --- Cache config path, parsed config and DAO factory -local kong_config_path, kong_config - --- Retrieve the desired Kong config file, parse it and provides a DAO factory --- Will cache them for future retrieval --- @param args_config Path to the desired configuration (usually from the --config CLI argument) --- @return Parsed desired Kong configuration --- @return Path to desired Kong config --- @return Instanciated DAO factory -local function get_kong_config(args_config) - -- Get configuration from default or given path - if not kong_config_path then - kong_config_path = cutils.get_kong_config_path(args_config) - cutils.logger:info("Using configuration: "..kong_config_path) - end - if not kong_config then - kong_config = config.load(kong_config_path) - end - return kong_config, kong_config_path -end - --- Check if an executable (typically `nginx`) is a distribution of openresty --- @param path_to_check Path to the binary --- @return true or false -local function is_openresty(path_to_check) - local cmd = path_to_check.." -v" - local out = IO.os_execute(cmd) - return out:match("^nginx version: ngx_openresty/") - or out:match("^nginx version: openresty/") - or out:match("^nginx version: nginx/[%w.%s]+%(nginx%-plus%-extras.+%)") -end - --- Preferred paths where to search for an `nginx` executable in priority to the $PATH -local NGINX_BIN = "nginx" -local NGINX_SEARCH_PATHS = { - "/usr/local/openresty/nginx/sbin/", - "/usr/local/opt/openresty/bin/", - "/usr/local/bin/", - "/usr/sbin/", - "" -- to check the $PATH -} - --- Try to find an `nginx` executable in defined paths, or in $PATH --- @return Path to found executable or nil if none was found -local function find_nginx() - for i = 1, #NGINX_SEARCH_PATHS do - local prefix = NGINX_SEARCH_PATHS[i] - local to_check = prefix..NGINX_BIN - if is_openresty(to_check) then - return to_check - end - end -end - --- Prepare the nginx `--prefix` directory (working directory) --- Extract the nginx config from a Kong config file into an `nginx.conf` file --- @param args_config Path to the desired configuration (usually from the --config CLI argument) -local function prepare_nginx_working_dir(args_config) - local kong_config = get_kong_config(args_config) - - -- Create nginx folder if needed - local _, err = IO.path:mkdir(IO.path:join(kong_config.nginx_working_dir, "logs")) - if err then - cutils.logger:error_exit(err) - end - - -- Create logs files - os.execute("touch "..IO.path:join(kong_config.nginx_working_dir, "logs", "error.log")) - os.execute("touch "..IO.path:join(kong_config.nginx_working_dir, "logs", "access.log")) - - -- Create SSL folder if needed - local _, err = IO.path:mkdir(IO.path:join(kong_config.nginx_working_dir, "ssl")) - if err then - cutils.logger:error_exit(err) - end - - ssl.prepare_ssl(kong_config) - local ssl_cert_path, ssl_key_path = ssl.get_ssl_cert_and_key(kong_config) - local trusted_ssl_cert_path = kong_config.dao_config.ssl_certificate -- DAO ssl cert - - -- Extract nginx config from kong config, replace any needed value - local nginx_config = kong_config.nginx - local nginx_inject = { - proxy_port = kong_config.proxy_port, - proxy_ssl_port = kong_config.proxy_ssl_port, - admin_api_port = kong_config.admin_api_port, - dns_resolver = kong_config.dns_resolver.address, - memory_cache_size = kong_config.memory_cache_size, - ssl_cert = ssl_cert_path, - ssl_key = ssl_key_path, - lua_ssl_trusted_certificate = trusted_ssl_cert_path ~= nil and "lua_ssl_trusted_certificate \""..trusted_ssl_cert_path.."\";" or "" - } - - -- Auto-tune - local res, code = IO.os_execute("ulimit -n") - if code == 0 then - nginx_inject.auto_worker_rlimit_nofile = res - nginx_inject.auto_worker_connections = tonumber(res) > 16384 and 16384 or res - else - cutils.logger:error_exit("Can't determine ulimit") - end - - -- Inject properties - for k, v in pairs(nginx_inject) do - nginx_config = nginx_config:gsub("{{"..k.."}}", v) - end - - -- Inject additional configurations - nginx_inject = { - nginx_plus_status = kong_config.nginx_plus_status and "location /status { status; }" or nil - } - - for _, v in pairs(nginx_inject) do - nginx_config = nginx_config:gsub("# {{additional_configuration}}", "# {{additional_configuration}}\n "..v) - end - - -- Inject anonymous reports - if kong_config.send_anonymous_reports then - -- If there is no internet connection, disable this feature - if socket.dns.toip(constants.SYSLOG.ADDRESS) then - nginx_config = "error_log syslog:server="..constants.SYSLOG.ADDRESS..":"..tostring(constants.SYSLOG.PORT).." error;\n"..nginx_config - else - cutils.logger:warn("The internet connection might not be available, cannot resolve "..constants.SYSLOG.ADDRESS) - end - end - - -- Write nginx config - local ok, err = IO.write_to_file(IO.path:join(kong_config.nginx_working_dir, constants.CLI.NGINX_CONFIG), nginx_config) - if not ok then - cutils.logger:error_exit(err) - end -end - --- Prepare the database keyspace if needed (run schema migrations) --- @param args_config Path to the desired configuration (usually from the --config CLI argument) -local function prepare_database(args_config) - local kong_config = get_kong_config(args_config) - local dao_factory = dao.load(kong_config) - local migrations = require("kong.tools.migrations")(dao_factory, kong_config) - - local keyspace_exists, err = dao_factory.migrations:keyspace_exists() - if err then - cutils.logger:error_exit(err) - elseif not keyspace_exists then - cutils.logger:info("Database not initialized. Running migrations...") - end - - local function before(identifier) - cutils.logger:info(string.format( - "Migrating %s on keyspace \"%s\" (%s)", - cutils.colors.yellow(identifier), - cutils.colors.yellow(dao_factory.properties.keyspace), - dao_factory.type - )) - end - - local function on_each_success(identifier, migration) - cutils.logger:info(string.format( - "%s migrated up to: %s", - identifier, - cutils.colors.yellow(migration.name) - )) - end - - local err = migrations:run_all_migrations(before, on_each_success) - if err then - cutils.logger:error_exit(err) - end -end - --- --- PUBLIC --- - -local _M = {} - --- Constants -local START = "start" -local RESTART = "restart" -local RELOAD = "reload" -local STOP = "stop" -local QUIT = "quit" - -_M.RELOAD = RELOAD -_M.STOP = STOP -_M.QUIT = QUIT - -function _M.prepare_kong(args_config, signal) - local kong_config = get_kong_config(args_config) - local dao_config = kong_config.dao_config - - local printable_mt = require "kong.tools.printable" - setmetatable(dao_config, printable_mt) - - -- Print important informations - cutils.logger:info(string.format([[Kong version.......%s - Proxy HTTP port....%s - Proxy HTTPS port...%s - Admin API port.....%s - DNS resolver.......%s - Database...........%s %s - ]], - constants.VERSION, - kong_config.proxy_port, - kong_config.proxy_ssl_port, - kong_config.admin_api_port, - kong_config.dns_resolver.address, - kong_config.database, - tostring(dao_config))) - - cutils.logger:info("Connecting to the database...") - prepare_database(args_config) - prepare_nginx_working_dir(args_config, signal) -end - --- Checks whether a port is available. Exits the application if not available. --- @param port The port to check --- @param name Functional name the port is used for (display name) --- @param timeout (optional) Timeout in seconds after which a failure is logged --- and application exit is performed, if not provided then it will fail at once without retries. -local function check_port(port, name, timeout) - local expire = socket.gettime() + (timeout or 0) - local msg = tostring(port) .. (name and " ("..tostring(name)..")") - local warned - while not cutils.is_port_bindable(port) do - if expire <= socket.gettime() then - cutils.logger:error_exit("Port "..msg.." is being blocked by another process.") - else - if not warned then - cutils.logger:warn("Port "..msg.." is unavailable, retrying for "..tostring(timeout).." seconds") - warned = true - end - end - socket.sleep(0.5) - end -end - --- Send a signal to `nginx`. No signal will start the process --- This function wraps the control of the `nginx` execution. --- @see http://nginx.org/en/docs/beginners_guide.html#control --- @param args_config Path to the desired configuration (usually from the --config CLI argument) --- @param signal Signal to send. Ignoring this argument will try to start `nginx` --- @return A boolean: true for success, false otherwise -function _M.send_signal(args_config, signal) - -- Make sure nginx is there and is openresty - local port_timeout = 1 -- OPT: make timeout configurable (note: this is a blocking timeout!) - local nginx_path = find_nginx() - if not nginx_path then - cutils.logger:error_exit(string.format("Kong cannot find an 'nginx' executable.\nMake sure it is in your $PATH or in one of the following directories:\n%s", table.concat(NGINX_SEARCH_PATHS, "\n"))) - end - - local kong_config, kong_config_path = get_kong_config(args_config) - if not signal then signal = START end - - if signal == START then - local ports = { - ["Kong proxy"] = kong_config.proxy_port, - ["Kong proxy ssl"] = kong_config.proxy_ssl_port, - ["Kong admin api"] = kong_config.admin_api_port - } - for name, port in pairs(ports) do - check_port(port, name, port_timeout) - end - end - - -- Build nginx signal command - local cmd = string.format("KONG_CONF=%s %s -p %s -c %s -g 'pid %s;' %s", - kong_config_path, - nginx_path, - kong_config.nginx_working_dir, - constants.CLI.NGINX_CONFIG, - constants.CLI.NGINX_PID, - signal == START and "" or "-s "..signal) - - -- dnsmasq start/stop - if signal == START then - dnsmasq.stop(kong_config) - if kong_config.dns_resolver.dnsmasq then - local dnsmasq_port = kong_config.dns_resolver.port - check_port(dnsmasq_port, "dnsmasq", port_timeout) - dnsmasq.start(kong_config.nginx_working_dir, dnsmasq_port) - end - elseif signal == STOP or signal == QUIT then - dnsmasq.stop(kong_config) - end - - -- Check ulimit value - if signal == START or signal == RESTART or signal == RELOAD then - local res, code = IO.os_execute("ulimit -n") - if code == 0 and tonumber(res) < 4096 then - cutils.logger:warn('ulimit is currently set to "'..res..'". For better performance set it to at least "4096" using "ulimit -n"') - end - end - - -- Report signal action - if kong_config.send_anonymous_reports then - syslog.log({signal=signal}) - end - - -- Start failure handler - local success = os.execute(cmd) == 0 - - if signal == START and not success then - dnsmasq.stop(kong_config) -- If the start failed, then stop dnsmasq - end - - if signal == STOP and success then - if IO.file_exists(kong_config.pid_file) then - os.execute("while [ -f "..kong_config.pid_file.." ]; do sleep 0.5; done") - end - end - - return success -end - --- Test if Kong is already running by detecting a pid file. --- --- Note: --- If the pid file exists but no process seem to be running, will assume the pid --- is obsolete and try to delete it. --- --- @param args_config Path to the desired configuration (usually from the --config CLI argument) --- @return true is running, false otherwise --- @return If not running, an error containing the path where the pid was supposed to be -function _M.is_running(args_config) - -- Get configuration from default or given path - local kong_config = get_kong_config(args_config) - - if IO.file_exists(kong_config.pid_file) then - local pid = IO.read_file(kong_config.pid_file) - local _, code = IO.os_execute("kill -0 "..pid) - if code == 0 then - return true - else - cutils.logger:warn("It seems like Kong crashed the last time it was started!") - cutils.logger:info("Removing pid at: "..kong_config.pid_file) - local _, err = os.remove(kong_config.pid_file) - if err then - error(err) - end - return false, "Not running. Could not find pid: "..pid - end - else - return false, "Not running. Could not find pid at: "..kong_config.pid_file - end -end - -return _M diff --git a/kong/cli/utils/ssl.lua b/kong/cli/utils/ssl.lua index 880dfe9a665..fdec6deab8f 100644 --- a/kong/cli/utils/ssl.lua +++ b/kong/cli/utils/ssl.lua @@ -1,4 +1,4 @@ -local cutils = require "kong.cli.utils" +local logger = require "kong.cli.utils.logger" local utils = require "kong.tools.utils" local IO = require "kong.tools.io" @@ -9,7 +9,7 @@ function _M.get_ssl_cert_and_key(kong_config) if (kong_config.ssl_cert_path and not kong_config.ssl_key_path) or (kong_config.ssl_key_path and not kong_config.ssl_cert_path) then - cutils.logger:error_exit("Both \"ssl_cert_path\" and \"ssl_key_path\" need to be specified in the configuration, or none of them") + return false, "Both \"ssl_cert_path\" and \"ssl_key_path\" need to be specified in the configuration, or none of them" elseif kong_config.ssl_cert_path and kong_config.ssl_key_path then ssl_cert_path = kong_config.ssl_cert_path ssl_key_path = kong_config.ssl_key_path @@ -20,13 +20,13 @@ function _M.get_ssl_cert_and_key(kong_config) -- Check that the file exists if ssl_cert_path and not IO.file_exists(ssl_cert_path) then - cutils.logger:error_exit("Can't find default Kong SSL certificate at: "..ssl_cert_path) + return false, "Can't find default Kong SSL certificate at: "..ssl_cert_path end if ssl_key_path and not IO.file_exists(ssl_key_path) then - cutils.logger:error_exit("Can't find default Kong SSL key at: "..ssl_key_path) + return false, "Can't find default Kong SSL key at: "..ssl_key_path end - return ssl_cert_path, ssl_key_path + return { ssl_cert_path = ssl_cert_path, ssl_key_path = ssl_key_path } end function _M.prepare_ssl(kong_config) @@ -35,7 +35,7 @@ function _M.prepare_ssl(kong_config) if not (IO.file_exists(ssl_cert_path) and IO.file_exists(ssl_key_path)) then -- Autogenerating the certificates for the first time - cutils.logger:info("Auto-generating the default SSL certificate and key...") + logger:info("Auto-generating the default SSL certificate and key...") local file_name = os.tmpname() local passphrase = utils.random_string() @@ -51,8 +51,10 @@ function _M.prepare_ssl(kong_config) mv ]]..file_name..[[.key ]]..ssl_key_path) if code ~= 0 then - cutils.logger:error_exit("There was an error when auto-generating the default SSL certificate: "..res) + return false, "There was an error when auto-generating the default SSL certificate: "..res end + + return true end end diff --git a/kong/cli/version.lua b/kong/cli/version.lua deleted file mode 100644 index 10b5450c562..00000000000 --- a/kong/cli/version.lua +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env luajit - -local cutils = require "kong.cli.utils" -local constants = require "kong.constants" - -cutils.logger:print(string.format("Kong version: %s", constants.VERSION)) diff --git a/kong/constants.lua b/kong/constants.lua index 1eac8496559..043141f0067 100644 --- a/kong/constants.lua +++ b/kong/constants.lua @@ -1,4 +1,4 @@ -local VERSION = "0.5.4" +local VERSION = "0.6.0rc3" return { NAME = "kong", @@ -11,9 +11,7 @@ return { }, CLI = { GLOBAL_KONG_CONF = "/etc/kong/kong.yml", - NGINX_CONFIG = "nginx.conf", - NGINX_PID = "kong.pid", - DNSMASQ_PID = "dnsmasq.pid", + NGINX_CONFIG = "nginx.conf" }, DATABASE_NULL_ID = "00000000-0000-0000-0000-000000000000", DATABASE_ERROR_TYPES = setmetatable ({ diff --git a/kong/core/cluster.lua b/kong/core/cluster.lua new file mode 100644 index 00000000000..ac78f2aca7b --- /dev/null +++ b/kong/core/cluster.lua @@ -0,0 +1,102 @@ +local cluster_utils = require "kong.tools.cluster" +local Serf = require "kong.cli.services.serf" +local cache = require "kong.tools.database_cache" +local cjson = require "cjson" + +local resty_lock +local status, res = pcall(require, "resty.lock") +if status then + resty_lock = res +end + +local KEEPALIVE_INTERVAL = 30 +local ASYNC_AUTOJOIN_INTERVAL = 3 +local ASYNC_AUTOJOIN_RETRIES = 20 -- Try for max a minute (3s * 20) + +local function create_timer(at, cb) + local ok, err = ngx.timer.at(at, cb) + if not ok then + ngx.log(ngx.ERR, "[cluster] failed to create timer: ", err) + end +end + +local function async_autojoin(premature) + if premature then return end + + -- If this node is the only node in the cluster, but other nodes are present, then try to join them + -- This usually happens when two nodes are started very fast, and the first node didn't write his + -- information into the datastore yet. When the second node starts up, there is nothing to join yet. + if not configuration.cluster["auto-join"] then return end + + local lock = resty_lock:new("cluster_autojoin_locks", { + exptime = ASYNC_AUTOJOIN_INTERVAL - 0.001 + }) + local elapsed = lock:lock("async_autojoin") + if elapsed and elapsed == 0 then + -- If the current member count on this node's cluster is 1, but there are more than 1 active nodes in + -- the DAO, then try to join them + local count, err = dao.nodes:count_by_keys() + if err then + ngx.log(ngx.ERR, tostring(err)) + elseif count > 1 then + local serf = Serf(configuration) + local res, err = serf:invoke_signal("members", {["-format"] = "json"}) + if err then + ngx.log(ngx.ERR, tostring(err)) + end + + local members = cjson.decode(res).members + if #members < 2 then + -- Trigger auto-join + local _, err = serf:_autojoin(cluster_utils.get_node_name(configuration)) + if err then + ngx.log(ngx.ERR, tostring(err)) + end + else + return -- The node is already in the cluster and no need to continue + end + end + + -- Create retries counter key if it doesn't exist + if not cache.get(cache.autojoin_retries_key()) then + cache.rawset(cache.autojoin_retries_key(), 0) + end + + local autojoin_retries = cache.incr(cache.autojoin_retries_key(), 1) -- Increment retries counter + if (autojoin_retries < ASYNC_AUTOJOIN_RETRIES) then + create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) + end + end +end + +local function send_keepalive(premature) + if premature then return end + + local lock = resty_lock:new("cluster_locks", { + exptime = KEEPALIVE_INTERVAL - 0.001 + }) + local elapsed = lock:lock("keepalive") + if elapsed and elapsed == 0 then + -- Send keepalive + local node_name = cluster_utils.get_node_name(configuration) + local nodes, err = dao.nodes:find_by_keys({name = node_name}) + if err then + ngx.log(ngx.ERR, tostring(err)) + elseif #nodes == 1 then + local node = table.remove(nodes, 1) + local _, err = dao.nodes:update(node) + if err then + ngx.log(ngx.ERR, tostring(err)) + end + end + end + + create_timer(KEEPALIVE_INTERVAL, send_keepalive) +end + +return { + init_worker = function() + create_timer(KEEPALIVE_INTERVAL, send_keepalive) + create_timer(ASYNC_AUTOJOIN_INTERVAL, async_autojoin) -- Only execute one time + end +} diff --git a/kong/core/events.lua b/kong/core/events.lua new file mode 100644 index 00000000000..ed8ca771681 --- /dev/null +++ b/kong/core/events.lua @@ -0,0 +1,37 @@ +local Object = require "classic" +local Mediator = require "mediator" + +local Events = Object:extend() + +Events.TYPES = { + CLUSTER_PROPAGATE = "CLUSTER_PROPAGATE", + ENTITY_CREATED = "ENTITY_CREATED", + ENTITY_UPDATED = "ENTITY_UPDATED", + ENTITY_DELETED = "ENTITY_DELETED", + ["MEMBER-JOIN"] = "MEMBER-JOIN", + ["MEMBER-LEAVE"] = "MEMBER-LEAVE", + ["MEMBER-FAILED"] = "MEMBER-FAILED", + ["MEMBER-UPDATE"] = "MEMBER-UPDATE", + ["MEMBER-REAP"] = "MEMBER-REAP" +} + +function Events:new(plugins) + self._mediator = Mediator() +end + +function Events:subscribe(event_name, fn) + if fn then + self._mediator:subscribe({event_name}, function(message_t) + fn(message_t) + return nil, true -- Required to tell mediator to continue processing other events + end) + end +end + +function Events:publish(event_name, message_t) + if event_name then + self._mediator:publish({string.upper(event_name)}, message_t) + end +end + +return Events \ No newline at end of file diff --git a/kong/core/handler.lua b/kong/core/handler.lua index 9861eb144a7..5ebb90de271 100644 --- a/kong/core/handler.lua +++ b/kong/core/handler.lua @@ -17,9 +17,9 @@ -- `KONG__TIME`: time taken by Kong to execute all the plugins for this context -- -- @see https://github.com/openresty/lua-nginx-module#ngxctx - -local utils = require "kong.tools.utils" local reports = require "kong.core.reports" +local utils = require "kong.tools.utils" +local cluster = require "kong.core.cluster" local resolver = require "kong.core.resolver" local constants = require "kong.constants" local certificate = require "kong.core.certificate" @@ -35,6 +35,7 @@ return { init_worker = { before = function() reports.init_worker() + cluster.init_worker() end }, certificate = { diff --git a/kong/core/hooks.lua b/kong/core/hooks.lua new file mode 100644 index 00000000000..f183c772de7 --- /dev/null +++ b/kong/core/hooks.lua @@ -0,0 +1,162 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" +local stringy = require "stringy" +local cjson = require "cjson" +local Serf = require "kong.cli.services.serf" + +local function invalidate_plugin(entity) + cache.delete(cache.plugin_key(entity.name, entity.api_id, entity.consumer_id)) +end + +local function invalidate(message_t) + if message_t.collection == "consumers" then + cache.delete(cache.consumer_key(message_t.entity.id)) + elseif message_t.collection == "apis" then + if message_t.entity then + cache.delete(cache.api_key(message_t.entity.id)) + end + cache.delete(cache.all_apis_by_dict_key()) + elseif message_t.collection == "plugins" then + -- Handles both the update and the delete + invalidate_plugin(message_t.old_entity and message_t.old_entity or message_t.entity) + end +end + +local function get_cluster_members() + local serf = require("kong.cli.services.serf")(configuration) + local res, err = serf:invoke_signal("members", { ["-format"] = "json" }) + if err then + ngx.log(ngx.ERR, err) + else + return cjson.decode(res).members + end +end + +local function retrieve_member_address(name) + local members = get_cluster_members() + for _, member in ipairs(members) do + if member.name == name then + return member.addr + end + end +end + +local function parse_member(member_str) + if member_str and stringy.strip(member_str) ~= "" then + local result = {} + local index = 1 + for v in member_str:gmatch("%S+") do + if index == 1 then + result.name = v + elseif index == 2 then + result.cluster_listening_address = retrieve_member_address(result.name) + end + index = index + 1 + end + return result + end +end + +local function member_leave(message_t) + local member = parse_member(message_t.entity) + + local _, err = dao.nodes:delete({ + name = member.name + }) + if err then + ngx.log(ngx.ERR, tostring(err)) + end +end + +local function member_update(message_t, is_reap) + local member = parse_member(message_t.entity) + + local nodes, err = dao.nodes:find_by_keys({ + name = member.name + }) + if err then + ngx.log(ngx.ERR, tostring(err)) + return + end + + if #nodes == 1 then + local node = table.remove(nodes, 1) + node.cluster_listening_address = member.cluster_listening_address + local _, err = dao.nodes:update(node) + if err then + ngx.log(ngx.ERR, tostring(err)) + return + end + end + + if is_reap and dao.nodes:count_by_keys({}) > 1 then + -- Purge the cache when a failed node re-appears + cache.delete_all() + end +end + +local function member_join(message_t) + local member = parse_member(message_t.entity) + + local nodes, err = dao.nodes:find_by_keys({ + name = member.name + }) + if err then + ngx.log(ngx.ERR, tostring(err)) + return + end + + if #nodes == 0 then -- Insert + local _, err = dao.nodes:insert({ + name = stringy.strip(member.name), + cluster_listening_address = stringy.strip(member.cluster_listening_address) + }) + if err then + ngx.log(ngx.ERR, tostring(err)) + return + end + elseif #nodes == 1 then -- Update + member_update(message_t) + else + error("Inconsistency error. More than one node found with name "..member.name) + end + + -- Purge the cache when a new node joins + if dao.nodes:count_by_keys({}) > 1 then -- If it's only one node, no need to delete the cache + cache.delete_all() + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_CREATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.CLUSTER_PROPAGATE] = function(message_t) + local serf = Serf(configuration) + local ok, err = serf:event(message_t) + if not ok then + ngx.log(ngx.ERR, err) + end + end, + [events.TYPES["MEMBER-JOIN"]] = function(message_t) + member_join(message_t) + end, + [events.TYPES["MEMBER-LEAVE"]] = function(message_t) + member_leave(message_t) + end, + [events.TYPES["MEMBER-FAILED"]] = function(message_t) + member_update(message_t) + end, + [events.TYPES["MEMBER-UPDATE"]] = function(message_t) + member_update(message_t) + end, + [events.TYPES["MEMBER-REAP"]] = function(message_t) + member_update(message_t, true) + end +} \ No newline at end of file diff --git a/kong/core/reports.lua b/kong/core/reports.lua index ba9805f6f24..0332fdaa830 100644 --- a/kong/core/reports.lua +++ b/kong/core/reports.lua @@ -1,5 +1,14 @@ local syslog = require "kong.tools.syslog" local cache = require "kong.tools.database_cache" +local utils = require "kong.tools.utils" +local unique_str = utils.random_string() +local enabled = false + +local resty_lock +local status, res = pcall(require, "resty.lock") +if status then + resty_lock = res +end local INTERVAL = 3600 @@ -11,15 +20,16 @@ local function create_timer(at, cb) end local function send_ping(premature) - local resty_lock = require "resty.lock" - local lock = resty_lock:new("locks", { + if premature then return end + + local lock = resty_lock:new("reports_locks", { exptime = INTERVAL - 0.001 }) local elapsed = lock:lock("ping") if elapsed and elapsed == 0 then local reqs = cache.get(cache.requests_key()) if not reqs then reqs = 0 end - syslog.log({signal = "ping", requests=reqs, process_id=process_id}) + syslog.log({signal = "ping", requests = reqs, unique_id = unique_str}) cache.incr(cache.requests_key(), -reqs) -- Reset counter end create_timer(INTERVAL, send_ping) @@ -27,10 +37,17 @@ end return { init_worker = function() - cache.rawset(cache.requests_key(), 0, 0) -- Initializing the counter - create_timer(INTERVAL, send_ping) + if enabled then + cache.rawset(cache.requests_key(), 0, 0) -- Initializing the counter + create_timer(INTERVAL, send_ping) + end end, log = function() - cache.incr(cache.requests_key(), 1) + if enabled then + cache.incr(cache.requests_key(), 1) + end + end, + enable = function() + enabled = true end } diff --git a/kong/core/resolver.lua b/kong/core/resolver.lua index f76bb4c7e52..ea3b5ef32f7 100644 --- a/kong/core/resolver.lua +++ b/kong/core/resolver.lua @@ -191,7 +191,7 @@ local function find_api(uri, headers) local api, matched_host, hosts_list, strip_request_path_pattern -- Retrieve all APIs - local apis_dics, err = cache.get_or_set("ALL_APIS_BY_DIC", _M.load_apis_in_memory, 60) -- 60 seconds cache, longer than usual + local apis_dics, err = cache.get_or_set(cache.all_apis_by_dict_key(), _M.load_apis_in_memory) if err then return err end diff --git a/kong/dao/cassandra/apis.lua b/kong/dao/cassandra/apis.lua index 7e5d5d5d02b..3ed91f6c62c 100644 --- a/kong/dao/cassandra/apis.lua +++ b/kong/dao/cassandra/apis.lua @@ -7,10 +7,10 @@ local table_insert = table.insert local Apis = BaseDao:extend() -function Apis:new(properties) +function Apis:new(properties, events_handler) self._table = "apis" self._schema = apis_schema - Apis.super.new(self, properties) + Apis.super.new(self, properties, events_handler) end function Apis:find_all() diff --git a/kong/dao/cassandra/base_dao.lua b/kong/dao/cassandra/base_dao.lua index c300222fff7..05885d2d1c1 100644 --- a/kong/dao/cassandra/base_dao.lua +++ b/kong/dao/cassandra/base_dao.lua @@ -15,6 +15,7 @@ local stringy = require "stringy" local Object = require "classic" local utils = require "kong.tools.utils" local uuid = require "lua_uuid" +local event_types = require("kong.core.events").TYPES local table_remove = table.remove local error_types = constants.DATABASE_ERROR_TYPES @@ -155,7 +156,9 @@ function BaseDao:insert(t) if stmt_err then return nil, stmt_err else - return self:_unmarshall(t) + local res = self:_unmarshall(t) + self:event(event_types.ENTITY_CREATED, res) + return res end end @@ -285,7 +288,9 @@ function BaseDao:update(t, full, where_t) if err then return nil, err else - return self:_unmarshall(t) + local res = self:_unmarshall(t) + self:event(event_types.ENTITY_UPDATED, old_entity) + return res end end @@ -401,6 +406,8 @@ function BaseDao:delete(primary_key_t, where_t) end end + self:event(event_types.ENTITY_DELETED, row) + return true end @@ -423,7 +430,7 @@ end -- child class and called once the child class has a schema set. -- @param properties Cassandra properties from the configuration file. -- @treturn table Instanciated DAO. -function BaseDao:new(properties) +function BaseDao:new(properties, events_handler) if self._schema then self._primary_key = self._schema.primary_key self._clustering_key = self._schema.clustering_key @@ -442,6 +449,7 @@ function BaseDao:new(properties) end self.properties = properties + self.events_handler = events_handler self.cascade_delete_hooks = {} end @@ -639,4 +647,24 @@ function BaseDao:add_delete_hook(foreign_dao_name, foreign_column, parent_column table.insert(self.cascade_delete_hooks, delete_hook) end -return BaseDao +-- Publishes an event, if an event handler has been specified. +-- Currently this propagates the events cluster-wide. +-- @param[type=string] type The event type to publish +-- @param[type=table] data_t The payload to publish in the event +function BaseDao:event(type, data_t) + if self.events_handler then + if self._schema.marshall_event then + data_t = self._schema.marshall_event(self._schema, data_t) + end + + local payload = { + collection = self._table, + type = type, + entity = data_t + } + + self.events_handler:publish(self.events_handler.TYPES.CLUSTER_PROPAGATE, payload) + end +end + +return BaseDao \ No newline at end of file diff --git a/kong/dao/cassandra/consumers.lua b/kong/dao/cassandra/consumers.lua index 9eda80991ab..e3143233193 100644 --- a/kong/dao/cassandra/consumers.lua +++ b/kong/dao/cassandra/consumers.lua @@ -3,11 +3,11 @@ local consumers_schema = require "kong.dao.schemas.consumers" local Consumers = BaseDao:extend() -function Consumers:new(properties) +function Consumers:new(properties, events_handler) self._table = "consumers" self._schema = consumers_schema - Consumers.super.new(self, properties) + Consumers.super.new(self, properties, events_handler) end return {consumers = Consumers} diff --git a/kong/dao/cassandra/factory.lua b/kong/dao/cassandra/factory.lua index e801c097b9b..d2a7fe7bc73 100644 --- a/kong/dao/cassandra/factory.lua +++ b/kong/dao/cassandra/factory.lua @@ -30,7 +30,8 @@ end -- Instantiate a Cassandra Factory and all its DAOs for various entities -- @param `properties` Cassandra properties -function CassandraFactory:new(properties, plugins, spawn_cluster) +function CassandraFactory:new(properties, plugins, spawn_cluster, events_handler) + self.events_handler = events_handler self.properties = properties self.type = "cassandra" self.daos = {} @@ -43,7 +44,7 @@ function CassandraFactory:new(properties, plugins, spawn_cluster) end -- Load core entities DAOs - for _, entity in ipairs({"apis", "consumers", "plugins"}) do + for _, entity in ipairs({"apis", "consumers", "plugins", "nodes"}) do self:load_daos(require("kong.dao.cassandra."..entity)) end @@ -76,7 +77,7 @@ end function CassandraFactory:load_daos(plugin_daos) local dao for name, plugin_dao in pairs(plugin_daos) do - dao = plugin_dao(self.properties) + dao = plugin_dao(self.properties, self.events_handler) dao._factory = self self.daos[name] = dao if dao._schema then diff --git a/kong/dao/cassandra/migrations.lua b/kong/dao/cassandra/migrations.lua index d11467802b4..317adde1e81 100644 --- a/kong/dao/cassandra/migrations.lua +++ b/kong/dao/cassandra/migrations.lua @@ -4,7 +4,7 @@ local BaseDao = require "kong.dao.cassandra.base_dao" local Migrations = BaseDao:extend() -function Migrations:new(properties) +function Migrations:new(properties, events_handler) self._table = "schema_migrations" self.queries = { get_keyspace = [[ @@ -24,7 +24,7 @@ function Migrations:new(properties) ]] } - Migrations.super.new(self, properties) + Migrations.super.new(self, properties, events_handler) end function Migrations:keyspace_exists(keyspace) diff --git a/kong/dao/cassandra/nodes.lua b/kong/dao/cassandra/nodes.lua new file mode 100644 index 00000000000..52fdf011e9a --- /dev/null +++ b/kong/dao/cassandra/nodes.lua @@ -0,0 +1,33 @@ +local BaseDao = require "kong.dao.cassandra.base_dao" +local nodes_schema = require "kong.dao.schemas.nodes" +local query_builder = require "kong.dao.cassandra.query_builder" + +local ipairs = ipairs +local table_insert = table.insert + +local Nodes = BaseDao:extend() + +function Nodes:new(properties, events_handler) + self._table = "nodes" + self._schema = nodes_schema + Nodes.super.new(self, properties, events_handler) +end + +function Nodes:find_all() + local nodes = {} + local select_q = query_builder.select(self._table) + + for rows, err in self:execute(select_q, nil, {auto_paging = true}) do + if err then + return nil, err + elseif rows ~= nil then + for _, row in ipairs(rows) do + table_insert(nodes, row) + end + end + end + + return nodes +end + +return {nodes = Nodes} \ No newline at end of file diff --git a/kong/dao/cassandra/plugins.lua b/kong/dao/cassandra/plugins.lua index fa61085877c..3ef53a18deb 100644 --- a/kong/dao/cassandra/plugins.lua +++ b/kong/dao/cassandra/plugins.lua @@ -10,11 +10,11 @@ local table_insert = table.insert local Plugins = BaseDao:extend() -function Plugins:new(properties) +function Plugins:new(properties, events_handler) self._table = "plugins" self._schema = plugins_schema - Plugins.super.new(self, properties) + Plugins.super.new(self, properties, events_handler) end -- @override diff --git a/kong/dao/cassandra/schema/migrations.lua b/kong/dao/cassandra/schema/migrations.lua index a2b009ed4fa..9b371e2b83d 100644 --- a/kong/dao/cassandra/schema/migrations.lua +++ b/kong/dao/cassandra/schema/migrations.lua @@ -101,7 +101,27 @@ local Migrations = { DROP TABLE plugins; ]] end + }, + -- Clustering nodes + { + name = "2015-11-23-817313_nodes", + up = function(options, dao_factory) + return dao_factory:execute_queries [[ + CREATE TABLE IF NOT EXISTS nodes( + name text, + cluster_listening_address text, + created_at timestamp, + PRIMARY KEY (name) + ) WITH default_time_to_live = 3600; + CREATE INDEX IF NOT EXISTS ON nodes(cluster_listening_address); + ]] + end, + down = function(options, dao_factory) + return dao_factory:execute_queries [[ + DROP TABLE nodes; + ]] + end } } -return Migrations +return Migrations \ No newline at end of file diff --git a/kong/dao/schemas/nodes.lua b/kong/dao/schemas/nodes.lua new file mode 100644 index 00000000000..add89dc4caa --- /dev/null +++ b/kong/dao/schemas/nodes.lua @@ -0,0 +1,9 @@ +return { + name = "Node", + primary_key = {"name"}, + fields = { + name = { type = "string" }, + created_at = { type = "timestamp", dao_insert_value = true }, + cluster_listening_address = { type = "string", queryable = true, required = true } + } +} diff --git a/kong/dao/schemas/plugins.lua b/kong/dao/schemas/plugins.lua index c2d3522e994..75d4253ed95 100644 --- a/kong/dao/schemas/plugins.lua +++ b/kong/dao/schemas/plugins.lua @@ -55,6 +55,20 @@ return { default = true } }, + marshall_event = function(self, plugin_t) + if plugin_t and plugin_t.config then + local config_schema, err = self.fields.config.schema(plugin_t) + if err then + return false, DaoError(err, constants.DATABASE_ERROR_TYPES.SCHEMA) + end + + if config_schema.marshall_event and type(config_schema.marshall_event) == "function" then + plugin_t.config = config_schema.marshall_event(plugin_t.config) + end + end + + return plugin_t + end, self_check = function(self, plugin_t, dao, is_update) -- Load the config schema local config_schema, err = self.fields.config.schema(plugin_t) diff --git a/kong/kong.lua b/kong/kong.lua index e5edab3ec14..4a0ca981b96 100644 --- a/kong/kong.lua +++ b/kong/kong.lua @@ -29,6 +29,7 @@ local utils = require "kong.tools.utils" local dao_loader = require "kong.tools.dao_loader" local config_loader = require "kong.tools.config_loader" local plugins_iterator = require "kong.core.plugins_iterator" +local Events = require "kong.core.events" local ipairs = ipairs local table_insert = table.insert @@ -39,6 +40,13 @@ local loaded_plugins = {} -- local configuration -- local dao_factory +--- Attach a hooks table to the event bus +local function attach_hooks(events, hooks) + for k, v in pairs(hooks) do + events:subscribe(k, v) + end +end + --- Load enabled plugins on the node. -- Get plugins in the DB (distinct by `name`), compare them with plugins -- in `configuration.plugins`. If both lists match, return a list @@ -71,6 +79,12 @@ local function load_node_plugins(configuration) handler = plugin_handler_mod() }) end + + -- Attaching hooks + local loaded, plugin_hooks = utils.load_module_if_exists("kong.plugins."..v..".hooks") + if loaded then + attach_hooks(events, plugin_hooks) + end end table_sort(sorted_plugins, function(a, b) @@ -108,9 +122,19 @@ local Kong = {} function Kong.init() local status, err = pcall(function() configuration = config_loader.load(os.getenv("KONG_CONF")) - dao = dao_loader.load(configuration, true) + events = Events() + dao = dao_loader.load(configuration, true, events) loaded_plugins = load_node_plugins(configuration) - process_id = utils.random_string() + + -- Attach core hooks + attach_hooks(events, require("kong.core.hooks")) + + if configuration.send_anonymous_reports then + -- Generate the unique_str inside the module + local reports = require "kong.core.reports" + reports.enable() + end + ngx.update_time() end) if not status then @@ -171,4 +195,4 @@ function Kong.log() core.log.after() end -return Kong +return Kong \ No newline at end of file diff --git a/kong/plugins/acl/api.lua b/kong/plugins/acl/api.lua index a5731a69035..5b7b1068f0b 100644 --- a/kong/plugins/acl/api.lua +++ b/kong/plugins/acl/api.lua @@ -38,4 +38,4 @@ return { crud.delete(self.params, dao_factory.acls) end } -} +} \ No newline at end of file diff --git a/kong/plugins/acl/daos.lua b/kong/plugins/acl/daos.lua index a5aaeffa9c2..2938a8d14b8 100644 --- a/kong/plugins/acl/daos.lua +++ b/kong/plugins/acl/daos.lua @@ -1,22 +1,37 @@ local BaseDao = require "kong.dao.cassandra.base_dao" +local function check_unique(group, acl) + -- If dao required to make this work in integration tests when adding fixtures + if dao and acl.consumer_id and group then + local res, err = dao.acls:find_by_keys({consumer_id=acl.consumer_id, group=group}) + if not err and #res > 0 then + return false, "ACL group already exist for this consumer" + elseif not err then + return true + end + end +end + local SCHEMA = { primary_key = {"id"}, fields = { id = { type = "id", dao_insert_value = true }, created_at = { type = "timestamp", dao_insert_value = true }, consumer_id = { type = "id", required = true, foreign = "consumers:id", queryable = true }, - group = { type = "string", required = true } - } + group = { type = "string", required = true, func = check_unique } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id } -- We don't need any data in the event + end } local ACLs = BaseDao:extend() -function ACLs:new(properties) +function ACLs:new(properties, events_handler) self._table = "acls" self._schema = SCHEMA - ACLs.super.new(self, properties) + ACLs.super.new(self, properties, events_handler) end return { acls = ACLs } diff --git a/kong/plugins/acl/hooks.lua b/kong/plugins/acl/hooks.lua new file mode 100644 index 00000000000..3c82b1aaa43 --- /dev/null +++ b/kong/plugins/acl/hooks.lua @@ -0,0 +1,23 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate_cache(consumer_id) + cache.delete(cache.acls_key(consumer_id)) +end + +local function invalidate(message_t) + if message_t.collection == "consumers" then + invalidate_cache(message_t.entity.id) + elseif message_t.collection == "acls" then + invalidate_cache(message_t.entity.consumer_id) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/basic-auth/daos.lua b/kong/plugins/basic-auth/daos.lua index 6b95d0c675f..9fd5085bb8f 100644 --- a/kong/plugins/basic-auth/daos.lua +++ b/kong/plugins/basic-auth/daos.lua @@ -2,6 +2,18 @@ local BaseDao = require "kong.dao.cassandra.base_dao" local crypto = require "kong.plugins.basic-auth.crypto" local function encrypt_password(password, credential) + -- Don't re-encrypt the password digest on update, if the password hasn't changed + -- This causes a bug when a new password is effectively equal the to previous digest + -- TODO: Better handle this scenario + if credential.id then + if dao then -- Check to make this work with tests + local result = dao.basicauth_credentials:find_by_primary_key({id=credential.id}) + if result and result.password == credential.password then + return true + end + end + end + credential.password = crypto.encrypt(credential) return true end @@ -14,16 +26,19 @@ local SCHEMA = { consumer_id = {type = "id", required = true, queryable = true, foreign = "consumers:id"}, username = {type = "string", required = true, unique = true, queryable = true}, password = {type = "string", func = encrypt_password} - } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id, username = t.username } + end } local BasicAuthCredentials = BaseDao:extend() -function BasicAuthCredentials:new(properties) +function BasicAuthCredentials:new(properties, events_handler) self._table = "basicauth_credentials" self._schema = SCHEMA - BasicAuthCredentials.super.new(self, properties) + BasicAuthCredentials.super.new(self, properties, events_handler) end return {basicauth_credentials = BasicAuthCredentials} diff --git a/kong/plugins/basic-auth/hooks.lua b/kong/plugins/basic-auth/hooks.lua new file mode 100644 index 00000000000..a4a4f44195c --- /dev/null +++ b/kong/plugins/basic-auth/hooks.lua @@ -0,0 +1,17 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "basicauth_credentials" then + cache.delete(cache.basicauth_credential_key(message_t.old_entity and message_t.old_entity.username or message_t.entity.username)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/hmac-auth/access.lua b/kong/plugins/hmac-auth/access.lua index 5bac6ec459d..ce778242a0e 100644 --- a/kong/plugins/hmac-auth/access.lua +++ b/kong/plugins/hmac-auth/access.lua @@ -102,14 +102,10 @@ local function validate_signature(request, hmac_params, headers) end end -local function hmacauth_credential_key(username) - return "hmacauth_credentials/"..username -end - local function load_credential(username) local credential if username then - credential = cache.get_or_set(hmacauth_credential_key(username), function() + credential = cache.get_or_set(cache.hmacauth_credential_key(username), function() local keys, err = dao.hmacauth_credentials:find_by_keys { username = username } local result if err then diff --git a/kong/plugins/hmac-auth/daos.lua b/kong/plugins/hmac-auth/daos.lua index d6cb4a6b096..6d2e2d0d35b 100644 --- a/kong/plugins/hmac-auth/daos.lua +++ b/kong/plugins/hmac-auth/daos.lua @@ -8,15 +8,18 @@ local SCHEMA = { consumer_id = { type = "id", required = true, queryable = true, foreign = "consumers:id" }, username = { type = "string", required = true, unique = true, queryable = true }, secret = { type = "string" } - } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id, username = t.username } + end } local HMACAuthCredentials = BaseDao:extend() -function HMACAuthCredentials:new(properties) +function HMACAuthCredentials:new(properties, events_handler) self._table = "hmacauth_credentials" self._schema = SCHEMA - HMACAuthCredentials.super.new(self, properties) + HMACAuthCredentials.super.new(self, properties, events_handler) end return { hmacauth_credentials = HMACAuthCredentials } diff --git a/kong/plugins/hmac-auth/hooks.lua b/kong/plugins/hmac-auth/hooks.lua new file mode 100644 index 00000000000..e3eaa110f5b --- /dev/null +++ b/kong/plugins/hmac-auth/hooks.lua @@ -0,0 +1,17 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "hmacauth_credentials" then + cache.delete(cache.hmacauth_credential_key(message_t.old_entity and message_t.old_entity.username or message_t.entity.username)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/http-log/handler.lua b/kong/plugins/http-log/handler.lua index d596482e04a..952fc2ffcbf 100644 --- a/kong/plugins/http-log/handler.lua +++ b/kong/plugins/http-log/handler.lua @@ -45,6 +45,8 @@ end -- @param `conf` Configuration table, holds http endpoint details -- @param `message` Message to be logged local function log(premature, conf, message) + if premature then return end + local ok, err local parsed_url = parse_url(conf.http_endpoint) local host = parsed_url.host diff --git a/kong/plugins/jwt/daos.lua b/kong/plugins/jwt/daos.lua index 1c1a6505191..62bc03f972c 100644 --- a/kong/plugins/jwt/daos.lua +++ b/kong/plugins/jwt/daos.lua @@ -9,16 +9,19 @@ local SCHEMA = { consumer_id = {type = "id", required = true, queryable = true, foreign = "consumers:id"}, key = {type = "string", unique = true, queryable = true, default = utils.random_string}, secret = {type = "string", unique = true, default = utils.random_string} - } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id, key = t.key } + end } local Jwt = BaseDao:extend() -function Jwt:new(properties) +function Jwt:new(properties, events_handler) self._table = "jwt_secrets" self._schema = SCHEMA - Jwt.super.new(self, properties) + Jwt.super.new(self, properties, events_handler) end return {jwt_secrets = Jwt} diff --git a/kong/plugins/jwt/handler.lua b/kong/plugins/jwt/handler.lua index c9c294e6423..faf4850c9a8 100644 --- a/kong/plugins/jwt/handler.lua +++ b/kong/plugins/jwt/handler.lua @@ -45,10 +45,6 @@ local function retrieve_token(request, conf) end end -local function jwt_secret_cache_key(consumer_id) - return "jwt_secret/"..consumer_id -end - function JwtHandler:new() JwtHandler.super.new(self, "jwt") end @@ -78,7 +74,7 @@ function JwtHandler:access(conf) end -- Retrieve the secret - local jwt_secret = cache.get_or_set(jwt_secret_cache_key(jwt_secret_key), function() + local jwt_secret = cache.get_or_set(cache.jwtauth_credential_key(jwt_secret_key), function() local rows, err = dao.jwt_secrets:find_by_keys {key = jwt_secret_key} if err then return responses.send_HTTP_INTERNAL_SERVER_ERROR() diff --git a/kong/plugins/jwt/hooks.lua b/kong/plugins/jwt/hooks.lua new file mode 100644 index 00000000000..05f2c1b7a8a --- /dev/null +++ b/kong/plugins/jwt/hooks.lua @@ -0,0 +1,17 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "jwt_secrets" then + cache.delete(cache.jwtauth_credential_key(message_t.old_entity and message_t.old_entity.key or message_t.entity.key)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/key-auth/daos.lua b/kong/plugins/key-auth/daos.lua index 2cfbf540cf5..f07ca2277f5 100644 --- a/kong/plugins/key-auth/daos.lua +++ b/kong/plugins/key-auth/daos.lua @@ -16,16 +16,19 @@ local SCHEMA = { created_at = { type = "timestamp", immutable = true, dao_insert_value = true }, consumer_id = { type = "id", required = true, queryable = true, foreign = "consumers:id" }, key = { type = "string", required = false, unique = true, queryable = true, func = generate_if_missing } - } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id, key = t.key } + end } local KeyAuth = BaseDao:extend() -function KeyAuth:new(properties) +function KeyAuth:new(properties, events_handler) self._table = "keyauth_credentials" self._schema = SCHEMA - KeyAuth.super.new(self, properties) + KeyAuth.super.new(self, properties, events_handler) end return { keyauth_credentials = KeyAuth } diff --git a/kong/plugins/key-auth/hooks.lua b/kong/plugins/key-auth/hooks.lua new file mode 100644 index 00000000000..336d02ef6f8 --- /dev/null +++ b/kong/plugins/key-auth/hooks.lua @@ -0,0 +1,17 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "keyauth_credentials" then + cache.delete(cache.keyauth_credential_key(message_t.old_entity and message_t.old_entity.key or message_t.entity.key)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/loggly/handler.lua b/kong/plugins/loggly/handler.lua index ee3939a9719..d05c2bbcec5 100644 --- a/kong/plugins/loggly/handler.lua +++ b/kong/plugins/loggly/handler.lua @@ -91,6 +91,8 @@ local function decide_severity(conf, severity, message) end local function log(premature, conf, message) + if premature then return end + if message.response.status >= 500 then return decide_severity(conf.log_level, conf.server_errors_severity, message) elseif message.response.status >= 400 then diff --git a/kong/plugins/mashape-analytics/buffer.lua b/kong/plugins/mashape-analytics/buffer.lua index 045abaeed68..7e5d2ef7f03 100644 --- a/kong/plugins/mashape-analytics/buffer.lua +++ b/kong/plugins/mashape-analytics/buffer.lua @@ -62,6 +62,8 @@ buffer_mt.MAX_COLLECTOR_PAYLOAD_SIZE = MAX_COLLECTOR_PAYLOAD_SIZE -- as possible. local delayed_send_handler delayed_send_handler = function(premature, buffer) + if premature then return end + if ngx_now() - buffer.latest_call < buffer.auto_flush_delay then -- If the latest call was received during the wait delay, abort the delayed send and -- report it for X more seconds. @@ -191,7 +193,8 @@ end -- If the queue still has payloads to be sent, keep on sending them. -- If the connection to the collector fails, use the retry policy. function buffer_mt.send_batch(premature, self) - if self.lock_sending then return end + if premature or self.lock_sending then return end + self.lock_sending = true -- simple lock if table_getn(self.sending_queue) < 1 then @@ -280,4 +283,4 @@ function buffer_mt.send_batch(premature, self) end end -return buffer_mt +return buffer_mt \ No newline at end of file diff --git a/kong/plugins/oauth2/api.lua b/kong/plugins/oauth2/api.lua index 5f0236be8d4..74ebbda6437 100644 --- a/kong/plugins/oauth2/api.lua +++ b/kong/plugins/oauth2/api.lua @@ -1,8 +1,36 @@ local crud = require "kong.api.crud_helpers" return { + ["/oauth2_tokens/"] = { + GET = function(self, dao_factory) + crud.paginated_set(self, dao_factory.oauth2_tokens) + end, + + PUT = function(self, dao_factory) + crud.put(self.params, dao_factory.oauth2_tokens) + end, + + POST = function(self, dao_factory) + crud.post(self.params, dao_factory.oauth2_tokens) + end + }, + + ["/oauth2_tokens/:id"] = { + GET = function(self, dao_factory) + crud.get(self.params, dao_factory.oauth2_tokens) + end, + + PATCH = function(self, dao_factory) + crud.patch(self.params, dao_factory.oauth2_tokens) + end, + + DELETE = function(self, dao_factory) + crud.delete(self.params, dao_factory.oauth2_tokens) + end + }, + ["/oauth2/"] = { - GET = function(self, dao_factory, helpers) + GET = function(self, dao_factory) crud.paginated_set(self, dao_factory.oauth2_credentials) end }, diff --git a/kong/plugins/oauth2/daos.lua b/kong/plugins/oauth2/daos.lua index f109d1d64d9..d57bcce7719 100644 --- a/kong/plugins/oauth2/daos.lua +++ b/kong/plugins/oauth2/daos.lua @@ -10,7 +10,7 @@ local function generate_if_missing(v, t, column) end local function generate_refresh_token(v, t, column) - if t.expires_in > 0 then + if t.expires_in and t.expires_in > 0 then return generate_if_missing(v, t, column) end return true @@ -26,7 +26,10 @@ local OAUTH2_CREDENTIALS_SCHEMA = { client_secret = { type = "string", required = false, unique = true, func = generate_if_missing }, redirect_uri = { type = "url", required = true }, created_at = { type = "timestamp", immutable = true, dao_insert_value = true } - } + }, + marshall_event = function(self, t) + return { id = t.id, consumer_id = t.consumer_id, client_id = t.client_id } + end } local OAUTH2_AUTHORIZATION_CODES_SCHEMA = { @@ -47,37 +50,40 @@ local OAUTH2_TOKENS_SCHEMA = { id = { type = "id", dao_insert_value = true }, credential_id = { type = "id", required = true, queryable = true, foreign = "oauth2_credentials:id" }, token_type = { type = "string", required = true, enum = { BEARER }, default = BEARER }, - access_token = { type = "string", required = false, unique = true, queryable = true, immutable = true, func = generate_if_missing }, - refresh_token = { type = "string", required = false, unique = true, queryable = true, immutable = true, func = generate_refresh_token }, expires_in = { type = "number", required = true }, + access_token = { type = "string", required = false, unique = true, queryable = true, func = generate_if_missing }, + refresh_token = { type = "string", required = false, unique = true, queryable = true, func = generate_refresh_token }, authenticated_userid = { type = "string", required = false, queryable = true }, scope = { type = "string" }, created_at = { type = "timestamp", immutable = true, dao_insert_value = true } - } + }, + marshall_event = function(self, t) + return { id = t.id, credential_id = t.credential_id, access_token = t.access_token } + end } local OAuth2Credentials = BaseDao:extend() -function OAuth2Credentials:new(properties) +function OAuth2Credentials:new(properties, events_handler) self._table = "oauth2_credentials" self._schema = OAUTH2_CREDENTIALS_SCHEMA - OAuth2Credentials.super.new(self, properties) + OAuth2Credentials.super.new(self, properties, events_handler) end local OAuth2AuthorizationCodes = BaseDao:extend() -function OAuth2AuthorizationCodes:new(properties) +function OAuth2AuthorizationCodes:new(properties, events_handler) self._table = "oauth2_authorization_codes" self._schema = OAUTH2_AUTHORIZATION_CODES_SCHEMA - OAuth2AuthorizationCodes.super.new(self, properties) + OAuth2AuthorizationCodes.super.new(self, properties, events_handler) end local OAuth2Tokens = BaseDao:extend() -function OAuth2Tokens:new(properties) +function OAuth2Tokens:new(properties, events_handler) self._table = "oauth2_tokens" self._schema = OAUTH2_TOKENS_SCHEMA - OAuth2Tokens.super.new(self, properties) + OAuth2Tokens.super.new(self, properties, events_handler) end return { oauth2_credentials = OAuth2Credentials, oauth2_authorization_codes = OAuth2AuthorizationCodes, oauth2_tokens = OAuth2Tokens } diff --git a/kong/plugins/oauth2/hooks.lua b/kong/plugins/oauth2/hooks.lua new file mode 100644 index 00000000000..9f296166172 --- /dev/null +++ b/kong/plugins/oauth2/hooks.lua @@ -0,0 +1,20 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "oauth2_credentials" then + cache.delete(cache.oauth2_credential_key(message_t.old_entity and message_t.old_entity.client_id or message_t.entity.client_id)) + cache.delete(cache.oauth2_credential_key(message_t.entity.id)) + elseif message_t.collection == "oauth2_tokens" then + cache.delete(cache.oauth2_token_key(message_t.old_entity and message_t.old_entity.access_token or message_t.entity.access_token)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/rate-limiting/daos.lua b/kong/plugins/rate-limiting/daos.lua index 37ac937851d..84e62ed1a65 100644 --- a/kong/plugins/rate-limiting/daos.lua +++ b/kong/plugins/rate-limiting/daos.lua @@ -8,7 +8,7 @@ local tostring = tostring local RateLimitingMetrics = BaseDao:extend() -function RateLimitingMetrics:new(properties) +function RateLimitingMetrics:new(properties, events_handler) self._table = "ratelimiting_metrics" self.queries = { increment_counter = [[ UPDATE ratelimiting_metrics SET value = value + ? WHERE api_id = ? AND @@ -25,7 +25,7 @@ function RateLimitingMetrics:new(properties) period = ?; ]] } - RateLimitingMetrics.super.new(self, properties) + RateLimitingMetrics.super.new(self, properties, events_handler) end function RateLimitingMetrics:increment(api_id, identifier, current_timestamp, value) diff --git a/kong/plugins/response-ratelimiting/daos.lua b/kong/plugins/response-ratelimiting/daos.lua index e8cdeb03f50..386655c4996 100644 --- a/kong/plugins/response-ratelimiting/daos.lua +++ b/kong/plugins/response-ratelimiting/daos.lua @@ -8,7 +8,7 @@ local tostring = tostring local ResponseRateLimitingMetrics = BaseDao:extend() -function ResponseRateLimitingMetrics:new(properties) +function ResponseRateLimitingMetrics:new(properties, events_handler) self._table = "response_ratelimiting_metrics" self.queries = { increment_counter = [[ UPDATE response_ratelimiting_metrics SET value = value + ? WHERE api_id = ? AND @@ -25,7 +25,7 @@ function ResponseRateLimitingMetrics:new(properties) period = ?; ]] } - ResponseRateLimitingMetrics.super.new(self, properties) + ResponseRateLimitingMetrics.super.new(self, properties, events_handler) end function ResponseRateLimitingMetrics:increment(api_id, identifier, current_timestamp, value, name) diff --git a/kong/plugins/response-ratelimiting/log.lua b/kong/plugins/response-ratelimiting/log.lua index 638c2d95a2b..af9542dbccb 100644 --- a/kong/plugins/response-ratelimiting/log.lua +++ b/kong/plugins/response-ratelimiting/log.lua @@ -9,6 +9,8 @@ local function increment(api_id, identifier, current_timestamp, value, name) end local function log(premature, api_id, identifier, current_timestamp, increments, usage) + if premature then return end + -- Increment metrics for all periods if the request goes through for k, v in pairs(usage) do if increments[k] and increments[k] ~= 0 then diff --git a/kong/plugins/ssl/hooks.lua b/kong/plugins/ssl/hooks.lua new file mode 100644 index 00000000000..264125a5379 --- /dev/null +++ b/kong/plugins/ssl/hooks.lua @@ -0,0 +1,19 @@ +local events = require "kong.core.events" +local cache = require "kong.tools.database_cache" + +local function invalidate(message_t) + if message_t.collection == "apis" then + cache.delete(cache.ssl_data(message_t.entity.id)) + elseif message_t.collection == "plugins" then + cache.delete(cache.ssl_data(message_t.old_entity and message_t.old_entity.api_id or message_t.entity.api_id)) + end +end + +return { + [events.TYPES.ENTITY_UPDATED] = function(message_t) + invalidate(message_t) + end, + [events.TYPES.ENTITY_DELETED] = function(message_t) + invalidate(message_t) + end +} \ No newline at end of file diff --git a/kong/plugins/ssl/schema.lua b/kong/plugins/ssl/schema.lua index 7bcb0c2a327..67a56eca43b 100644 --- a/kong/plugins/ssl/schema.lua +++ b/kong/plugins/ssl/schema.lua @@ -2,19 +2,19 @@ local ssl_util = require "kong.plugins.ssl.ssl_util" local base64 = require "base64" local function validate_cert(v) - local der = ssl_util.cert_to_der(v) + local der, err = ssl_util.cert_to_der(v) if der then return true, nil, { _cert_der_cache = base64.encode(der) } end - return false, "Invalid data" + return false, "Invalid data: "..err end local function validate_key(v) - local der = ssl_util.key_to_der(v) + local der, err = ssl_util.key_to_der(v) if der then return true, nil, { _key_der_cache = base64.encode(der) } end - return false, "Invalid data" + return false, "Invalid data: "..err end return { @@ -26,7 +26,10 @@ return { accept_http_if_already_terminated = { required = false, type = "boolean", default = false }, -- Internal use - _cert_der_cache = { type = "string", immutable = true }, - _key_der_cache = { type = "string", immutable = true } - } + _cert_der_cache = { type = "string" }, + _key_der_cache = { type = "string" } + }, + marshall_event = function(self, t) + return {} -- We don't need any value in the cache event + end } diff --git a/kong/plugins/ssl/ssl_util.lua b/kong/plugins/ssl/ssl_util.lua index d5fcbcf2202..25c649a1ef7 100644 --- a/kong/plugins/ssl/ssl_util.lua +++ b/kong/plugins/ssl/ssl_util.lua @@ -12,16 +12,18 @@ local function execute_openssl(data, cmd) IO.write_to_file(input, data) -- Execute OpenSSL command - local _, code = IO.os_execute(string.format(cmd, input, output)) + local res, code = IO.os_execute(string.format(cmd, input, output)) if code == 0 then result = IO.read_file(output) - end - -- Remove temp files - os.remove(input) - os.remove(output) + -- Remove temp files + os.remove(input) + os.remove(output) - return result + return result + else + return false, res + end end function _M.cert_to_der(data) diff --git a/kong/plugins/syslog/handler.lua b/kong/plugins/syslog/handler.lua index f6f3b731bf1..5144d982287 100644 --- a/kong/plugins/syslog/handler.lua +++ b/kong/plugins/syslog/handler.lua @@ -34,6 +34,8 @@ local function send_to_syslog(log_level, severity, message) end local function log(premature, conf, message) + if premature then return end + if message.response.status >= 500 then send_to_syslog(conf.log_level, conf.server_errors_severity, message) elseif message.response.status >= 400 then diff --git a/kong/plugins/tcp-log/handler.lua b/kong/plugins/tcp-log/handler.lua index 3823eb74d27..ee5182b59ec 100644 --- a/kong/plugins/tcp-log/handler.lua +++ b/kong/plugins/tcp-log/handler.lua @@ -5,6 +5,8 @@ local cjson = require "cjson" local TcpLogHandler = BasePlugin:extend() local function log(premature, conf, message) + if premature then return end + local ok, err local host = conf.host local port = conf.port diff --git a/kong/plugins/udp-log/handler.lua b/kong/plugins/udp-log/handler.lua index 90bd2844593..8ba04be506d 100644 --- a/kong/plugins/udp-log/handler.lua +++ b/kong/plugins/udp-log/handler.lua @@ -7,6 +7,8 @@ local UdpLogHandler = BasePlugin:extend() UdpLogHandler.PRIORITY = 1 local function log(premature, conf, message) + if premature then return end + local host = conf.host local port = conf.port local timeout = conf.timeout diff --git a/kong/tools/cluster.lua b/kong/tools/cluster.lua new file mode 100644 index 00000000000..613067ca7db --- /dev/null +++ b/kong/tools/cluster.lua @@ -0,0 +1,9 @@ +local utils = require "kong.tools.utils" + +local _M = {} + +function _M.get_node_name(conf) + return utils.get_hostname().."_"..conf.cluster_listen +end + +return _M \ No newline at end of file diff --git a/kong/tools/config_defaults.lua b/kong/tools/config_defaults.lua index 4d3eb2474bb..b47d6ba6f65 100644 --- a/kong/tools/config_defaults.lua +++ b/kong/tools/config_defaults.lua @@ -1,9 +1,11 @@ return { ["custom_plugins"] = {type = "array", default = {}}, ["nginx_working_dir"] = {type = "string", default = "/usr/local/kong"}, - ["proxy_port"] = {type = "number", default = 8000}, - ["proxy_ssl_port"] = {type = "number", default = 8443}, - ["admin_api_port"] = {type = "number", default = 8001}, + ["proxy_listen"] = {type = "string", default = "0.0.0.0:8000"}, + ["proxy_listen_ssl"] = {type = "string", default = "0.0.0.0:8443"}, + ["admin_api_listen"] = {type = "string", default = "0.0.0.0:8001"}, + ["cluster_listen"] = {type = "string", default = "0.0.0.0:7946"}, + ["cluster_listen_rpc"] = {type = "string", default = "127.0.0.1:7373"}, ["dns_resolver"] = {type = "string", default = "dnsmasq", enum = {"server", "dnsmasq"}}, ["dns_resolvers_available"] = { type = "table", @@ -22,6 +24,14 @@ return { } } }, + ["cluster"] = { + type = "table", + content = { + ["auto-join"] = {type = "boolean", default = true}, + ["advertise"] = {type = "string", nullable = true}, + ["encrypt"] = {type = "string", nullable = true} + } + }, ["database"] = {type = "string", default = "cassandra", enum = {"cassandra"}}, ["cassandra"] = { type = "table", @@ -43,10 +53,9 @@ return { } } }, - ["database_cache_expiration"] = {type = "number", default = 5}, ["ssl_cert_path"] = {type = "string", nullable = true}, ["ssl_key_path"] = {type = "string", nullable = true}, - ["send_anonymous_reports"] = {type = "boolean", default = false}, + ["send_anonymous_reports"] = {type = "boolean", default = true}, ["memory_cache_size"] = {type = "number", default = 128, min = 32}, ["nginx"] = {type = "string", nullable = true} } diff --git a/kong/tools/config_loader.lua b/kong/tools/config_loader.lua index 7012ec40329..25be6509065 100644 --- a/kong/tools/config_loader.lua +++ b/kong/tools/config_loader.lua @@ -1,7 +1,8 @@ local yaml = require "yaml" local IO = require "kong.tools.io" local utils = require "kong.tools.utils" -local cutils = require "kong.cli.utils" +local logger = require "kong.cli.utils.logger" +local luarocks = require "kong.cli.utils.luarocks" local stringy = require "stringy" local constants = require "kong.constants" local config_defaults = require "kong.tools.config_defaults" @@ -14,6 +15,37 @@ local function get_type(value, val_type) end end +local function is_valid_IPv4(ip) + if not ip or stringy.strip(ip) == "" then return false end + + local a, b, c, d = ip:match("^(%d%d?%d?)%.(%d%d?%d?)%.(%d%d?%d?)%.(%d%d?%d?)$") + a = tonumber(a) + b = tonumber(b) + c = tonumber(c) + d = tonumber(d) + if not a or not b or not c or not d then return false end + if a < 0 or 255 < a then return false end + if b < 0 or 255 < b then return false end + if c < 0 or 255 < c then return false end + if d < 0 or 255 < d then return false end + + return true +end + +local function is_valid_address(value, only_IPv4) + if not value or stringy.strip(value) == "" then return false end + + local parts = stringy.split(value, ":") + if #parts ~= 2 then return false end + if stringy.strip(parts[1]) == "" then return false end + if only_IPv4 and not is_valid_IPv4(parts[1]) then return false end + local port = tonumber(parts[2]) + if not port then return false end + if not (port > 0 and port <= 65535) then return false end + + return true +end + local checks = { type = function(value, key_infos, value_type) if value_type ~= key_infos.type then @@ -38,7 +70,10 @@ local function validate_config_schema(config, config_schema) for config_key, key_infos in pairs(config_schema) do -- Default value - property = config[config_key] or key_infos.default + property = config[config_key] + if property == nil then + property = key_infos.default + end -- Recursion on table values if key_infos.type == "table" and key_infos.content ~= nil then @@ -81,7 +116,27 @@ function _M.validate(config) return false, errors end - -- Perform complex validations here if needed + -- Check listen addresses + if config.proxy_listen and not is_valid_address(config.proxy_listen) then + return false, {proxy_listen = config.proxy_listen.." is not a valid \"host:port\" value"} + end + if config.proxy_listen_ssl and not is_valid_address(config.proxy_listen_ssl) then + return false, {proxy_listen_ssl = config.proxy_listen_ssl.." is not a valid \"host:port\" value"} + end + if config.admin_api_listen and not is_valid_address(config.admin_api_listen) then + return false, {admin_api_listen = config.admin_api_listen.." is not a valid \"host:port\" value"} + end + -- Cluster listen addresses must have an IPv4 host (no hostnames) + if config.cluster_listen and not is_valid_address(config.cluster_listen, true) then + return false, {cluster_listen = config.cluster_listen.." is not a valid \"ip:port\" value"} + end + if config.cluster_listen_rpc and not is_valid_address(config.cluster_listen_rpc, true) then + return false, {cluster_listen_rpc = config.cluster_listen_rpc.." is not a valid \"ip:port\" value"} + end + -- Same for the cluster.advertise value + if config.cluster and config.cluster.advertise and stringy.strip(config.cluster.advertise) ~= "" and not is_valid_address(config.cluster.advertise, true) then + return false, {["cluster.advertise"] = config.cluster.advertise.." is not a valid \"ip:port\" value"} + end return true end @@ -89,7 +144,8 @@ end function _M.load(config_path) local config_contents = IO.read_file(config_path) if not config_contents then - cutils.logger:error_exit("No configuration file at: "..config_path) + logger:error("No configuration file at: "..config_path) + os.exit(1) end local config = yaml.load(config_contents) @@ -100,9 +156,10 @@ function _M.load(config_path) if type(config_error) == "table" then config_error = table.concat(config_error, ", ") end - cutils.logger:warn(string.format("%s: %s", config_key, config_error)) + logger:warn(string.format("%s: %s", config_key, config_error)) end - cutils.logger:error_exit("Invalid properties in given configuration file") + logger:error("Invalid properties in given configuration file") + os.exit(1) end -- Adding computed properties @@ -125,10 +182,20 @@ function _M.load(config_path) config.nginx_working_dir = fs.current_dir().."/"..config.nginx_working_dir end - -- Load all plugins config.plugins = utils.table_merge(constants.PLUGINS_AVAILABLE, config.custom_plugins) - return config + return config, config_path +end + +function _M.load_default(config_path) + if not IO.file_exists(config_path) then + logger:warn("No configuration at: "..config_path.." using default config instead.") + config_path = IO.path:join(luarocks.get_config_dir(), "kong.yml") + end + + logger:info("Using configuration: "..config_path) + + return _M.load(config_path) end return _M diff --git a/kong/tools/dao_loader.lua b/kong/tools/dao_loader.lua index d38b843c5fc..cd3a6ccb034 100644 --- a/kong/tools/dao_loader.lua +++ b/kong/tools/dao_loader.lua @@ -1,8 +1,8 @@ local _M = {} -function _M.load(config, spawn_cluster) +function _M.load(config, spawn_cluster, events_handler) local DaoFactory = require("kong.dao."..config.database..".factory") - return DaoFactory(config.dao_config, config.plugins, spawn_cluster) + return DaoFactory(config.dao_config, config.plugins, spawn_cluster, events_handler) end -return _M +return _M \ No newline at end of file diff --git a/kong/tools/database_cache.lua b/kong/tools/database_cache.lua index 18dcd86a288..546791dfaa8 100644 --- a/kong/tools/database_cache.lua +++ b/kong/tools/database_cache.lua @@ -1,106 +1,119 @@ local cjson = require "cjson" +local cache = ngx.shared.cache local CACHE_KEYS = { APIS = "apis", CONSUMERS = "consumers", PLUGINS = "plugins", BASICAUTH_CREDENTIAL = "basicauth_credentials", + HMACAUTH_CREDENTIAL = "hmacauth_credentials", KEYAUTH_CREDENTIAL = "keyauth_credentials", OAUTH2_CREDENTIAL = "oauth2_credentials", + JWTAUTH_CREDENTIAL = "jwtauth_credentials", OAUTH2_TOKEN = "oauth2_token", ACLS = "acls", SSL = "ssl", REQUESTS = "requests", - TIMERS = "timers" + AUTOJOIN_RETRIES = "autojoin_retries", + TIMERS = "timers", + ALL_APIS_BY_DIC = "ALL_APIS_BY_DIC" } local _M = {} -function _M.rawset(key, value, exptime) - local cache = ngx.shared.cache - return cache:set(key, value, exptime or 0) +function _M.rawset(key, value) + return cache:set(key, value) end -function _M.set(key, value, exptime) - if exptime == nil then - exptime = configuration and configuration.database_cache_expiration or 0 - end - +function _M.set(key, value) if value then value = cjson.encode(value) - ngx.log(ngx.DEBUG, " saving cache key \""..key.."\": "..value) end - return _M.rawset(key, value, exptime) + return _M.rawset(key, value) end function _M.rawget(key) - ngx.log(ngx.DEBUG, " Try to get cache key \""..key.."\"") - local cache = ngx.shared.cache return cache:get(key) - end function _M.get(key) local value, flags = _M.rawget(key) if value then - ngx.log(ngx.DEBUG, " Found cache value for key \""..key.."\": "..value) value = cjson.decode(value) end return value, flags end function _M.incr(key, value) - local cache = ngx.shared.cache return cache:incr(key, value) end function _M.delete(key) - local cache = ngx.shared.cache cache:delete(key) end +function _M.delete_all() + cache:flush_all() -- This does not free up the memory, only marks the items as expired +end + function _M.requests_key() return CACHE_KEYS.REQUESTS end +function _M.autojoin_retries_key() + return CACHE_KEYS.AUTOJOIN_RETRIES +end + function _M.api_key(host) - return CACHE_KEYS.APIS.."/"..host + return CACHE_KEYS.APIS..":"..host end function _M.consumer_key(id) - return CACHE_KEYS.CONSUMERS.."/"..id + return CACHE_KEYS.CONSUMERS..":"..id end function _M.plugin_key(name, api_id, consumer_id) - return CACHE_KEYS.PLUGINS.."/"..name.."/"..api_id..(consumer_id and "/"..consumer_id or "") + return CACHE_KEYS.PLUGINS..":"..name..":"..api_id..(consumer_id and ":"..consumer_id or "") end function _M.basicauth_credential_key(username) - return CACHE_KEYS.BASICAUTH_CREDENTIAL.."/"..username + return CACHE_KEYS.BASICAUTH_CREDENTIAL..":"..username end function _M.oauth2_credential_key(client_id) - return CACHE_KEYS.OAUTH2_CREDENTIAL.."/"..client_id + return CACHE_KEYS.OAUTH2_CREDENTIAL..":"..client_id end function _M.oauth2_token_key(access_token) - return CACHE_KEYS.OAUTH2_TOKEN.."/"..access_token + return CACHE_KEYS.OAUTH2_TOKEN..":"..access_token end function _M.keyauth_credential_key(key) - return CACHE_KEYS.KEYAUTH_CREDENTIAL.."/"..key + return CACHE_KEYS.KEYAUTH_CREDENTIAL..":"..key +end + +function _M.hmacauth_credential_key(username) + return CACHE_KEYS.HMACAUTH_CREDENTIAL..":"..username +end + +function _M.jwtauth_credential_key(secret) + return CACHE_KEYS.JWTAUTH_CREDENTIAL..":"..secret end function _M.acls_key(consumer_id) - return CACHE_KEYS.ACLS.."/"..consumer_id + return CACHE_KEYS.ACLS..":"..consumer_id end function _M.ssl_data(api_id) - return CACHE_KEYS.SSL.."/"..api_id + return CACHE_KEYS.SSL..":"..api_id +end + +function _M.all_apis_by_dict_key() + return CACHE_KEYS.ALL_APIS_BY_DIC end -function _M.get_or_set(key, cb, exptime) +function _M.get_or_set(key, cb) local value, err -- Try to get value = _M.get(key) @@ -110,7 +123,7 @@ function _M.get_or_set(key, cb, exptime) if err then return nil, err elseif value then - local ok, err = _M.set(key, value, exptime) + local ok, err = _M.set(key, value) if not ok then ngx.log(ngx.ERR, err) end diff --git a/kong/tools/faker.lua b/kong/tools/faker.lua index 555980527fe..02eaffe6a09 100644 --- a/kong/tools/faker.lua +++ b/kong/tools/faker.lua @@ -27,6 +27,11 @@ function Faker:fake_entity(type) name = "rate-limiting", config = { second = 10 } } + elseif type == "node" then + return { + name = "random_name_"..r, + cluster_listening_address = "random_address_"..r + } else error("Entity of type "..type.." cannot be generated.") end @@ -39,7 +44,7 @@ function Faker:seed(random_amount) local random_entities = {} - for _, type in ipairs({ "api", "consumer" }) do + for _, type in ipairs({ "api", "consumer", "node" }) do random_entities[type] = {} for i = 1, random_amount do table.insert(random_entities[type], self:fake_entity(type)) @@ -57,7 +62,7 @@ function Faker:insert_from_table(entities_to_insert) -- Insert in order (for foreign relashionships) -- 1. consumers and APIs -- 2. credentials, which need references to inserted apis and consumers - for _, type in ipairs({ "api", "consumer", "plugin", "oauth2_credential", "basicauth_credential", "keyauth_credential", "acl", "jwt_secret", "hmacauth_credential" }) do + for _, type in ipairs({ "api", "consumer", "plugin", "node", "oauth2_credential", "basicauth_credential", "keyauth_credential", "acl", "jwt_secret", "hmacauth_credential" }) do if entities_to_insert[type] then for i, entity in ipairs(entities_to_insert[type]) do diff --git a/kong/tools/http_client.lua b/kong/tools/http_client.lua index 5b8e9ccfa7b..6b85fd2bf5f 100644 --- a/kong/tools/http_client.lua +++ b/kong/tools/http_client.lua @@ -52,7 +52,7 @@ local function with_body(method) if type(body) == "table" then body = json.encode(body) end - else + elseif headers["content-type"] ~= "text/plain" then headers["content-type"] = "application/x-www-form-urlencoded" if type(body) == "table" then body = ngx.encode_args(body, true) diff --git a/kong/tools/io.lua b/kong/tools/io.lua index 2c8c521d6d7..7b10d3afb63 100644 --- a/kong/tools/io.lua +++ b/kong/tools/io.lua @@ -26,7 +26,7 @@ end -- @param command OS command to execute -- @return string containing command output (both stdout and stderr) -- @return exitcode -function _M.os_execute(command) +function _M.os_execute(command, preserve_output) local n = os.tmpname() -- get a temporary file name to store output local f = os.tmpname() -- get a temporary file name to store script _M.write_to_file(f, command) @@ -34,7 +34,7 @@ function _M.os_execute(command) local result = _M.read_file(n) os.remove(n) os.remove(f) - return string.gsub(string.gsub(result, "^"..f..":[%s%w]+:%s*", ""), "[%\r%\n]", ""), exit_code / 256 + return preserve_output and result or string.gsub(string.gsub(result, "^"..f..":[%s%w]+:%s*", ""), "[%\r%\n]", ""), exit_code / 256 end --- @@ -85,6 +85,7 @@ function _M.write_to_file(path, value) return true end + --- Get the filesize. -- @param path path to file to check -- @return size of file, or `nil` on failure diff --git a/kong/tools/migrations.lua b/kong/tools/migrations.lua index e66b310e582..47b978e27b4 100644 --- a/kong/tools/migrations.lua +++ b/kong/tools/migrations.lua @@ -165,4 +165,4 @@ function Migrations:run_rollback(identifier, before, on_success) end end -return Migrations +return Migrations \ No newline at end of file diff --git a/kong/tools/printable.lua b/kong/tools/printable.lua index 59809fd1b51..2be2be8149c 100644 --- a/kong/tools/printable.lua +++ b/kong/tools/printable.lua @@ -20,7 +20,7 @@ function printable_mt:__tostring() end end - table.insert(t, k.."="..tostring(v)) + table.insert(t, (type(k) == "string" and k.."=" or "")..tostring(v)) end return table.concat(t, " ") end diff --git a/kong/tools/utils.lua b/kong/tools/utils.lua index 8bccb3b205c..c33417a9a4d 100644 --- a/kong/tools/utils.lua +++ b/kong/tools/utils.lua @@ -20,6 +20,18 @@ local string_format = string.format local _M = {} + +--- Retrieves the hostname of the local machine +-- @return string The hostname +function _M.get_hostname() + local f = io.popen ("/bin/hostname") + local hostname = f:read("*a") or "" + f:close() + hostname = string.gsub(hostname, "\n$", "") + return hostname +end + + --- Generates a random unique string -- @return string The random string (a uuid without hyphens) function _M.random_string() diff --git a/spec/integration/admin_api/cache_routes_spec.lua b/spec/integration/admin_api/cache_routes_spec.lua new file mode 100644 index 00000000000..11e34bc74a0 --- /dev/null +++ b/spec/integration/admin_api/cache_routes_spec.lua @@ -0,0 +1,90 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" + +local GET_URL = spec_helper.STUB_GET_URL + +describe("Admin API", function() + + setup(function() + spec_helper.prepare_db() + spec_helper.insert_fixtures { + api = { + {name = "api-cache", request_host = "cache.com", upstream_url = "http://mockbin.org/"}, + } + } + + spec_helper.start_kong() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + describe("/cache/", function() + local BASE_URL = spec_helper.API_URL.."/cache/" + + describe("GET", function() + + it("[FAILURE] should return an error when the key is invalid", function() + local _, status = http_client.get(BASE_URL.."hello") + assert.equal(404, status) + end) + + it("[SUCCESS] should get the value of a cache item", function() + -- Populating cache + local _, status = http_client.get(GET_URL, {}, {host = "cache.com"}) + assert.equal(200, status) + + -- Retrieving cache + local response, status = http_client.get(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(200, status) + assert.truthy(json.decode(response).by_dns) + end) + + end) + + describe("DELETE", function() + + it("[SUCCESS] should invalidate an entity", function() + -- Populating cache + local _, status = http_client.get(GET_URL, {}, {host = "cache.com"}) + assert.equal(200, status) + + -- Retrieving cache + local response, status = http_client.get(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(200, status) + assert.truthy(json.decode(response).by_dns) + + -- Delete + local _, status = http_client.delete(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(204, status) + + -- Make sure it doesn't exist + local _, status = http_client.get(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(404, status) + end) + + it("[SUCCESS] should invalidate all entities", function() + -- Populating cache + local _, status = http_client.get(GET_URL, {}, {host = "cache.com"}) + assert.equal(200, status) + + -- Retrieving cache + local response, status = http_client.get(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(200, status) + assert.truthy(json.decode(response).by_dns) + + -- Delete + local _, status = http_client.delete(BASE_URL) + assert.equal(204, status) + + -- Make sure it doesn't exist + local _, status = http_client.get(BASE_URL..cache.all_apis_by_dict_key()) + assert.equal(404, status) + end) + end) + + end) +end) \ No newline at end of file diff --git a/spec/integration/admin_api/cluster_routes_spec.lua b/spec/integration/admin_api/cluster_routes_spec.lua new file mode 100644 index 00000000000..66dea0cd594 --- /dev/null +++ b/spec/integration/admin_api/cluster_routes_spec.lua @@ -0,0 +1,106 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local utils = require "kong.tools.utils" + +describe("Admin API", function() + + setup(function() + spec_helper.prepare_db() + spec_helper.start_kong() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + describe("/cluster/events/", function() + local BASE_URL = spec_helper.API_URL.."/cluster/events" + + describe("POST", function() + it("[SUCCESS] should post a new event", function() + local _, status = http_client.post(BASE_URL, {}, {}) + assert.equal(200, status) + end) + end) + + end) + + describe("/cluster/", function() + + local BASE_URL = spec_helper.API_URL.."/cluster/" + + describe("GET", function() + it("[SUCCESS] should get the list of members", function() + os.execute("sleep 2") -- Let's wait for serf to register the node + + local response, status = http_client.get(BASE_URL, {}, {}) + assert.equal(200, status) + local body = json.decode(response) + assert.truthy(body) + assert.equal(1, #body.data) + assert.equal(1, body.total) + + local member = table.remove(body.data, 1) + assert.equal(3, utils.table_size(member)) + assert.truthy(member.address) + assert.truthy(member.name) + assert.truthy(member.status) + + assert.equal("alive", member.status) + end) + end) + + describe("DELETE", function() + + setup(function() + os.execute([[nohup serf agent -rpc-addr=127.0.0.1:20000 -bind=127.0.0.1:20001 -node=helloworld > serf.log 2>&1 & echo $! > serf.pid]]) + -- Wait for agent to start + while (os.execute("cat serf.log | grep running > /dev/null") / 256 == 1) do + -- Wait + end + end) + + teardown(function() + os.execute("kill -9 $(cat serf.pid) && rm serf.pid && rm serf.log") + end) + + it("[SUCCESS] should force-leave a node", function() + -- Join node + os.execute("serf join -rpc-addr=127.0.0.1:9101 127.0.0.1:20001 > /dev/null") + + os.execute("sleep 2") -- Let's wait for serf to register the node + + local response, status = http_client.get(BASE_URL, {}, {}) + assert.equal(200, status) + local body = json.decode(response) + assert.truthy(body) + assert.equal(2, #body.data) + assert.equal(2, body.total) + for _, v in ipairs(body.data) do + assert.equal("alive", v.status) + end + + local _, status = http_client.delete(BASE_URL, {name="helloworld"}, {}) + assert.equal(200, status) + os.execute("sleep 2") -- Let's wait for serf to propagate the event + + response, status = http_client.get(BASE_URL, {}, {}) + assert.equal(200, status) + local body = json.decode(response) + assert.truthy(body) + assert.equal(2, #body.data) + assert.equal(2, body.total) + local not_alive + for _, v in ipairs(body.data) do + if v.name == "helloworld" then + assert.equal("leaving", v.status) + not_alive = true + end + end + assert.truthy(not_alive) + end) + end) + + end) +end) \ No newline at end of file diff --git a/spec/integration/admin_api/route_helpers_spec.lua b/spec/integration/admin_api/route_helpers_spec.lua index c35d23419d9..e817179b086 100644 --- a/spec/integration/admin_api/route_helpers_spec.lua +++ b/spec/integration/admin_api/route_helpers_spec.lua @@ -1,9 +1,10 @@ local route_helpers = require "kong.api.route_helpers" +local utils = require "kong.tools.utils" describe("Route Helpers", function() it("should return the hostname", function() - assert.truthy(route_helpers.get_hostname()) + assert.truthy(utils.get_hostname()) end) it("should return parse the nginx status", function() diff --git a/spec/integration/cli/quit_spec.lua b/spec/integration/cli/cmds/quit_spec.lua similarity index 100% rename from spec/integration/cli/quit_spec.lua rename to spec/integration/cli/cmds/quit_spec.lua diff --git a/spec/integration/cli/reload_spec.lua b/spec/integration/cli/cmds/reload_spec.lua similarity index 100% rename from spec/integration/cli/reload_spec.lua rename to spec/integration/cli/cmds/reload_spec.lua diff --git a/spec/integration/cli/restart_spec.lua b/spec/integration/cli/cmds/restart_spec.lua similarity index 53% rename from spec/integration/cli/restart_spec.lua rename to spec/integration/cli/cmds/restart_spec.lua index 4003bbbd07a..bd31bcc0092 100644 --- a/spec/integration/cli/restart_spec.lua +++ b/spec/integration/cli/cmds/restart_spec.lua @@ -1,5 +1,4 @@ local spec_helper = require "spec.spec_helpers" -local IO = require "kong.tools.io" describe("CLI", function() @@ -26,24 +25,15 @@ describe("CLI", function() end) it("should restart kong when it's crashed", function() - local kong_pid = IO.read_file(spec_helper.get_env().configuration.pid_file) - if not kong_pid then - -- we might be to quick, so wait and retry - os.execute("sleep 1") - kong_pid = IO.read_file(spec_helper.get_env().configuration.pid_file) - if not kong_pid then error("Could not read Kong pid") end - end - os.execute("pkill -9 nginx") + local _, code = spec_helper.restart_kong() + assert.are.same(0, code) + end) - repeat - -- Wait till it's really over - local _, code = IO.os_execute("kill -0 "..kong_pid) - until(code ~= 0) - - local res, code = spec_helper.restart_kong() + it("should restart when a service has crashed", function() + os.execute("pkill -9 serf") + local _, code = spec_helper.restart_kong() assert.are.same(0, code) - assert.truthy(res:match("It seems like Kong crashed the last time it was started")) end) -end) +end) \ No newline at end of file diff --git a/spec/integration/cli/start_spec.lua b/spec/integration/cli/cmds/start_spec.lua similarity index 69% rename from spec/integration/cli/start_spec.lua rename to spec/integration/cli/cmds/start_spec.lua index f7f93066d25..09191836c11 100644 --- a/spec/integration/cli/start_spec.lua +++ b/spec/integration/cli/cmds/start_spec.lua @@ -1,23 +1,33 @@ local spec_helper = require "spec.spec_helpers" local yaml = require "yaml" local IO = require "kong.tools.io" +local http_client = require "kong.tools.http_client" local TEST_CONF = spec_helper.get_env().conf_file local SERVER_CONF = "kong_TEST_SERVER.yml" +local API_URL = spec_helper.API_URL + local function replace_conf_property(key, value) local yaml_value = yaml.load(IO.read_file(TEST_CONF)) yaml_value[key] = value - local ok = IO.write_to_file(SERVER_CONF, yaml.dump(yaml_value)) + local new_config_content = yaml.dump(yaml_value) + + -- Workaround for https://github.com/lubyk/yaml/issues/2 + -- This workaround is in two places. To remove it "Find and replace" in the code + new_config_content = string.gsub(new_config_content, "(%w+:%s*)([%w%.]+:%d+)", "%1\"%2\"") + + local ok = IO.write_to_file(SERVER_CONF, new_config_content) assert.truthy(ok) end describe("CLI", function() setup(function() + spec_helper.prepare_db() + os.execute("cp "..TEST_CONF.." "..SERVER_CONF) spec_helper.add_env(SERVER_CONF) - spec_helper.prepare_db(SERVER_CONF) end) teardown(function() @@ -28,7 +38,66 @@ describe("CLI", function() after_each(function() pcall(spec_helper.stop_kong, SERVER_CONF) end) + + describe("Generic", function() + it("should start up all the services", function() + assert.has_no.errors(function() + spec_helper.start_kong(TEST_CONF, true) + end) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + + assert.has.errors(function() + spec_helper.start_kong(TEST_CONF, true) + end) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + end) + end) + + describe("Nodes", function() + + it("should register and de-register the node into the datastore", function() + assert.has_no.errors(function() + spec_helper.start_kong(TEST_CONF, true) + end) + + local env = spec_helper.get_env() -- test environment + local dao_factory = env.dao_factory + + local nodes = {} + local err + + local start = os.time() + while(#nodes < 1 and (os.time() - start < 10)) do -- 10 seconds timeout + nodes, err = dao_factory.nodes:find_all() + assert.falsy(err) + assert.truthy(nodes) + end + + assert.truthy(#nodes > 0) + + assert.has_no.errors(function() + spec_helper.stop_kong(TEST_CONF, true) + end) + + nodes = {} + + start = os.time() + while(#nodes > 0 and (os.time() - start < 10)) do -- 10 seconds timeout + nodes, err = dao_factory.nodes:find_all() + assert.falsy(err) + assert.truthy(nodes) + end + + assert.truthy(#nodes == 0) + end) + + end) + describe("Startup plugins check", function() it("should start with the default configuration", function() @@ -125,8 +194,8 @@ describe("CLI", function() assert.error_matches(function() spec_helper.start_kong(SERVER_CONF, true) end, "You are using a plugin that has not been enabled in the configuration: custom-rate-limiting", nil, true) - end) end) + end) diff --git a/spec/integration/cli/cmds/status_spec.lua b/spec/integration/cli/cmds/status_spec.lua new file mode 100644 index 00000000000..1702fe77c71 --- /dev/null +++ b/spec/integration/cli/cmds/status_spec.lua @@ -0,0 +1,40 @@ +local spec_helper = require "spec.spec_helpers" + +describe("CLI", function() + + setup(function() + pcall(spec_helper.stop_kong) + end) + + teardown(function() + pcall(spec_helper.stop_kong) + end) + + it("the status check should fail when Kong is not running", function() + assert.error_matches(function() + spec_helper.status_kong() + end, "Kong is not running", nil, true) + end) + + it("the status check should not fail when Kong is running", function() + local _, code = spec_helper.start_kong() + assert.are.same(0, code) + local ok = pcall(spec_helper.status_kong) + assert.truthy(ok) + local ok = pcall(spec_helper.stop_kong) + assert.truthy(ok) + end) + + it("the status check should fail when some services are not running", function() + local _, code = spec_helper.start_kong() + assert.are.same(0, code) + + os.execute("pkill serf") + + assert.error_matches(function() + spec_helper.status_kong() + end, "Some services required by Kong are not running. Please execute \"kong restart\"!", nil, true) + end) + + +end) diff --git a/spec/integration/cli/version_spec.lua b/spec/integration/cli/cmds/version_spec.lua similarity index 100% rename from spec/integration/cli/version_spec.lua rename to spec/integration/cli/cmds/version_spec.lua diff --git a/spec/integration/cli/services/dnsmasq_spec.lua b/spec/integration/cli/services/dnsmasq_spec.lua new file mode 100644 index 00000000000..2f6f07a87f3 --- /dev/null +++ b/spec/integration/cli/services/dnsmasq_spec.lua @@ -0,0 +1,37 @@ +require("kong.cli.utils.logger"):set_silent(true) -- Set silent for test + +local spec_helper = require "spec.spec_helpers" +local configuration = require "kong.tools.config_loader".load(spec_helper.get_env().conf_file) +local dnsmasq = require("kong.cli.services.dnsmasq")(configuration) + +describe("Dnsmasq", function() + + setup(function() + dnsmasq:prepare() + end) + + it("should start and stop", function() + local ok, err = dnsmasq:start() + assert.truthy(ok) + assert.falsy(err) + + assert.truthy(dnsmasq:is_running()) + + -- Trying again will fail + local ok, err = dnsmasq:start() + assert.falsy(ok) + assert.truthy(err) + assert.equal("dnsmasq is already running", err) + + dnsmasq:stop() + + assert.falsy(dnsmasq:is_running()) + end) + + it("should stop even when not running", function() + assert.falsy(dnsmasq:is_running()) + dnsmasq:stop() + assert.falsy(dnsmasq:is_running()) + end) + +end) diff --git a/spec/integration/cli/services/nginx_spec.lua b/spec/integration/cli/services/nginx_spec.lua new file mode 100644 index 00000000000..22fdd851ede --- /dev/null +++ b/spec/integration/cli/services/nginx_spec.lua @@ -0,0 +1,169 @@ +require("kong.cli.utils.logger"):set_silent(true) -- Set silent for test + +local spec_helper = require "spec.spec_helpers" +local configuration, configuration_path = require("kong.tools.config_loader").load(spec_helper.get_env().conf_file) +local nginx = require("kong.cli.services.nginx")(configuration, configuration_path) + +local TIMEOUT = 10 + +describe("Nginx", function() + + setup(function() + spec_helper.prepare_db() + nginx:prepare() + end) + + after_each(function() + local prepare_res, err = nginx:prepare() + assert.falsy(err) + assert.truthy(prepare_res) + + nginx:stop(prepare_res) + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + end) + + it("should prepare", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + assert.truthy(nginx._configuration) + assert.truthy(type(nginx._configuration) == "table") + + assert.truthy(nginx._configuration_path) + end) + + it("should start and stop", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + local ok, err = nginx:start() + assert.truthy(ok) + assert.falsy(err) + + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + + assert.truthy(nginx:is_running()) + + -- Trying again will fail + local ok, err = nginx:start() + assert.falsy(ok) + assert.truthy(err) + assert.equal("nginx is already running", err) + + nginx:stop() + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + + assert.falsy(nginx:is_running()) + end) + + it("should stop even when not running", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + assert.falsy(nginx:is_running()) + nginx:stop() + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + + assert.falsy(nginx:is_running()) + end) + + it("should quit", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + assert.falsy(nginx:is_running()) + + local ok, err = nginx:start() + assert.truthy(ok) + assert.falsy(err) + + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + + assert.truthy(nginx:is_running()) + local ok, err = nginx:quit() + assert.truthy(ok) + assert.falsy(err) + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + assert.falsy(nginx:is_running()) + end) + + it("should not quit when not running", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + assert.falsy(nginx:is_running()) + local ok, err = nginx:quit() + assert.falsy(ok) + assert.truthy(err) + + -- Wait for process to quit, with a timeout + local start = os.time() + while (nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + assert.falsy(nginx:is_running()) + end) + + it("should reload", function() + local ok, err = nginx:prepare() + assert.falsy(err) + assert.truthy(ok) + + assert.falsy(nginx:is_running()) + + local ok, err = nginx:start() + assert.truthy(ok) + assert.falsy(err) + + -- Wait for process to start, with a timeout + local start = os.time() + while (not nginx:is_running() and os.time() < (start + TIMEOUT)) do + -- Wait + end + + local pid = nginx:is_running() + assert.truthy(pid) + + local ok, err = nginx:reload() + assert.truthy(ok) + assert.falsy(err) + + local new_pid = nginx:is_running() + assert.truthy(new_pid) + assert.truthy(pid == new_pid) + end) + +end) diff --git a/spec/integration/cli/services/serf_spec.lua b/spec/integration/cli/services/serf_spec.lua new file mode 100644 index 00000000000..cf3cb1df494 --- /dev/null +++ b/spec/integration/cli/services/serf_spec.lua @@ -0,0 +1,37 @@ +require("kong.cli.utils.logger"):set_silent(true) -- Set silent for test + +local spec_helper = require "spec.spec_helpers" +local configuration = require("kong.tools.config_loader").load(spec_helper.get_env().conf_file) +local serf = require("kong.cli.services.serf")(configuration) + +describe("Serf", function() + + setup(function() + serf:prepare() + end) + + it("should start and stop", function() + local ok, err = serf:start() + assert.truthy(ok) + assert.falsy(err) + + assert.truthy(serf:is_running()) + + -- Trying again will fail + local ok, err = serf:start() + assert.falsy(ok) + assert.truthy(err) + assert.equal("serf is already running", err) + + serf:stop() + + assert.falsy(serf:is_running()) + end) + + it("should stop even when not running", function() + assert.falsy(serf:is_running()) + serf:stop() + assert.falsy(serf:is_running()) + end) + +end) diff --git a/spec/integration/cli/utils/luarocks_spec.lua b/spec/integration/cli/utils/luarocks_spec.lua new file mode 100644 index 00000000000..0024cf3731a --- /dev/null +++ b/spec/integration/cli/utils/luarocks_spec.lua @@ -0,0 +1,21 @@ +local luarocks = require "kong.cli.utils.luarocks" + +describe("Luarocks", function() + + it("should get luarocks dir", function() + local res = luarocks.get_dir() + assert.truthy(res.name) + assert.truthy(res.root) + end) + + it("should get luarocks config dir", function() + local res = luarocks.get_config_dir() + assert.truthy(res) + end) + + it("should get luarocks install dir", function() + local res = luarocks.get_install_dir() + assert.truthy(res) + end) + +end) diff --git a/spec/integration/cluster/cluster_spec.lua b/spec/integration/cluster/cluster_spec.lua new file mode 100644 index 00000000000..e6a7785ddc8 --- /dev/null +++ b/spec/integration/cluster/cluster_spec.lua @@ -0,0 +1,347 @@ +local spec_helper = require "spec.spec_helpers" +local yaml = require "yaml" +local IO = require "kong.tools.io" +local http_client = require "kong.tools.http_client" +local cjson = require "cjson" + +local TEST_CONF = spec_helper.get_env().conf_file +local SERVER_CONF = "kong_TEST_SERVER.yml" + +local API_URL = spec_helper.API_URL +local PROXY_URL = spec_helper.PROXY_URL + +local SECOND_API_PORT = 9001 +local SECOND_API_URL = "http://127.0.0.1:"..SECOND_API_PORT + +local SECOND_PROXY_PORT = 9000 +local SECOND_PROXY_URL = "http://127.0.0.1:"..SECOND_PROXY_PORT + +local SECOND_SERVER_PROPERTIES = { + nginx_working_dir = "nginx_tmp_2", + proxy_listen = "0.0.0.0:"..SECOND_PROXY_PORT, + proxy_listen_ssl = "0.0.0.0:9443", + admin_api_listen = "0.0.0.0:"..SECOND_API_PORT, + cluster_listen = "0.0.0.0:9946", + cluster_listen_rpc = "0.0.0.0:9373", + dns_resolvers_available = { + dnsmasq = {port = 8054} + } +} + +local function replace_property(configuration, new_key, new_value) + if type(new_value) == "table" then + for k, v in pairs(new_value) do + if not configuration[new_key] then configuration[new_key] = {} end + configuration[new_key][k] = v + end + else + configuration[new_key] = new_value + end + return configuration +end + +local function replace_conf_property(t, output_file) + if not output_file then output_file = SERVER_CONF end + + local yaml_value = yaml.load(IO.read_file(TEST_CONF)) + for k, v in pairs(t) do + yaml_value = replace_property(yaml_value, k, v) + end + local new_config_content = yaml.dump(yaml_value) + + -- Workaround for https://github.com/lubyk/yaml/issues/2 + -- This workaround is in two places. To remove it "Find and replace" in the code + new_config_content = string.gsub(new_config_content, "(%w+:%s*)([%w%.]+:%d+)", "%1\"%2\"") + + local ok = IO.write_to_file(output_file, new_config_content) + assert.truthy(ok) +end + +describe("Cluster", function() + + local SECOND_WORKING_DIR = "nginx_tmp_2" + + setup(function() + pcall(spec_helper.stop_kong, TEST_CONF) + + spec_helper.prepare_db() + + os.execute("cp "..TEST_CONF.." "..SERVER_CONF) + os.execute("mkdir -p "..SECOND_WORKING_DIR) + spec_helper.add_env(SERVER_CONF) + spec_helper.prepare_db(SERVER_CONF) + replace_conf_property(SECOND_SERVER_PROPERTIES) + end) + + teardown(function() + os.remove(SERVER_CONF) + os.execute("rm -rf "..SECOND_WORKING_DIR) + spec_helper.remove_env(SERVER_CONF) + end) + + after_each(function() + pcall(spec_helper.stop_kong, TEST_CONF) + pcall(spec_helper.stop_kong, SERVER_CONF) + end) + + it("should register the node on startup", function() + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(1, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + end) + + it("should register the node on startup with the advertised address", function() + SECOND_SERVER_PROPERTIES.cluster = {advertise = "5.5.5.5:1234"} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(SECOND_API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[SERVER_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local res, err = spec_helper.envs[SERVER_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(1, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.equal("5.5.5.5:1234", res[1].cluster_listening_address) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + assert.equal("5.5.5.5:1234", cjson.decode(res).data[1].address) + + SECOND_SERVER_PROPERTIES.cluster = {advertise = ""} + replace_conf_property(SECOND_SERVER_PROPERTIES) + end) + + it("should register the second node on startup and auto-join sequentially", function() + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = true} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, status = http_client.get(API_URL) + assert.equal(200, status) -- is running + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + end) + + it("should register the second node on startup and auto-join asyncronously", function() + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + os.execute("sleep 5") + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(2, cjson.decode(res).total) + end) + + it("should not join the second node on startup when auto-join is false", function() + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = false} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 1) do + -- Wait + end + + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + local res, err = spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) + assert.falsy(err) + assert.equal(2, #res) + assert.truthy(res[1].created_at) + assert.truthy(res[1].name) + assert.truthy(res[1].cluster_listening_address) + assert.truthy(res[2].created_at) + assert.truthy(res[2].name) + assert.truthy(res[2].cluster_listening_address) + + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 1) + + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + + local res, status = http_client.get(SECOND_API_URL.."/cluster") + assert.equal(200, status) + assert.equal(1, cjson.decode(res).total) + end) + + it("cache should be purged on the node that joins", function() + replace_conf_property({cluster = {["auto-join"] = false}}, TEST_CONF) + SECOND_SERVER_PROPERTIES.cluster = {["auto-join"] = false} + replace_conf_property(SECOND_SERVER_PROPERTIES) + + -- Start the nodes + local _, exit_code = spec_helper.start_kong(TEST_CONF, true) + assert.are.same(0, exit_code) + local _, exit_code = spec_helper.start_kong(SERVER_CONF, true) + assert.are.same(0, exit_code) + + while(#spec_helper.envs[TEST_CONF].dao_factory.nodes:find_by_keys({}) ~= 2) do + -- Wait + end + + -- The nodes are sharing the same datastore, but not the same cluster + + -- Adding an API + local res, status = http_client.post(API_URL.."/apis", {request_host="test.com", upstream_url="http://mockbin.org"}) + assert.equal(201, status) + local api = cjson.decode(res) + + -- Populating the cache on both nodes + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + + -- Updating API on first node + local _, status = http_client.patch(API_URL.."/apis/"..api.id, {request_host="test2.com"}) + assert.equal(200, status) + + -- Making the request again on both nodes (the second node still process correctly the request) + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(200, status) + + -- Making the request again with the updated property (only the first node processes this correctly) + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(404, status) + + -- Joining the nodes in the same cluster + local _, exit_code = IO.os_execute("serf join -rpc-addr=127.0.0.1:9101 join 127.0.0.1:9946") + assert.are.same(0, exit_code) + -- Wait for join to complete + local total + repeat + local res, status = http_client.get(API_URL.."/cluster") + assert.equal(200, status) + total = cjson.decode(res).total + until(total == 2) + + -- Wait for cache purge to be executed by the hooks + os.execute("sleep 5") + + -- Making the request again on the new property, and now both nodes should work + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test2.com"}) + assert.equal(200, status) + + -- And it should not work on both on the old DNS + local _, status = http_client.get(PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + local _, status = http_client.get(SECOND_PROXY_URL.."/request", {}, {host = "test.com"}) + assert.equal(404, status) + + -------------------------------------------------------- + -- Bring back the auto-join for the default test FILE -- + -------------------------------------------------------- + replace_conf_property({cluster = {["auto-join"] = true}}, TEST_CONF) + end) + +end) \ No newline at end of file diff --git a/spec/integration/core/hooks_spec.lua b/spec/integration/core/hooks_spec.lua new file mode 100644 index 00000000000..9a40eae43d0 --- /dev/null +++ b/spec/integration/core/hooks_spec.lua @@ -0,0 +1,484 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" +local utils = require "kong.tools.utils" +local IO = require "kong.tools.io" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("Core Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "hooks1.com", upstream_url = "http://mockbin.com"}, + {request_host = "hooks-consumer.com", upstream_url = "http://mockbin.com"}, + {request_host = "hooks-plugins.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"} + }, + plugin = { + {name = "basic-auth", config = {}, __api = 2}, + {name = "basic-auth", config = {}, __api = 3}, + {name = "rate-limiting", config = {minute=10}, __api = 3}, + {name = "rate-limiting", config = {minute=3}, __api = 3, __consumer = 1} + }, + basicauth_credential = { + {username = "user123", password = "pass123", __consumer = 1} + } + } + end) + + describe("Plugin entity invalidation", function() + + it("should invalidate a plugin when deleting", function() + -- Making a request to populate the cache + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks-consumer.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("basic-auth", api_id, nil)) + assert.equals(200, status) + + -- Delete plugin + local response, status = http_client.get(API_URL.."/apis/"..api_id.."/plugins/", {name="basic-auth"}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + local _, status = http_client.delete(API_URL.."/apis/"..api_id.."/plugins/"..plugin_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("basic-auth", api_id, nil)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again without any authorization + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com"}) + assert.equals(200, status) + end) + it("should invalidate a consumer-specific plugin when deleting", function() + -- Making a request to populate the cache + local _, status, headers = http_client.get(STUB_GET_URL, {}, {host = "hooks-plugins.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + assert.equals(3, tonumber(headers["x-ratelimit-limit-minute"])) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks-plugins.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local response, status = http_client.get(API_URL.."/consumers/consumer1") + assert.equals(200, status) + local consumer_id = json.decode(response).id + assert.truthy(consumer_id) + + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("rate-limiting", api_id, consumer_id)) + assert.equals(200, status) + + -- Delete plugin + local response, status = http_client.get(API_URL.."/apis/"..api_id.."/plugins/", {name="rate-limiting", consumer_id=consumer_id}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + local _, status = http_client.delete(API_URL.."/apis/"..api_id.."/plugins/"..plugin_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("rate-limiting", api_id, consumer_id)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status, headers = http_client.get(STUB_GET_URL, {}, {host = "hooks-plugins.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + assert.equals(10, tonumber(headers["x-ratelimit-limit-minute"])) + end) + + it("should invalidate a consumer-specific plugin when updating", function() + -- Making a request to populate the cache + local _, status, headers = http_client.get(STUB_GET_URL, {}, {host = "hooks-plugins.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + assert.equals(3, tonumber(headers["x-ratelimit-limit-minute"])) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks-plugins.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local response, status = http_client.get(API_URL.."/consumers/consumer1") + assert.equals(200, status) + local consumer_id = json.decode(response).id + assert.truthy(consumer_id) + + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("rate-limiting", api_id, consumer_id)) + assert.equals(200, status) + + -- Update plugin + local response, status = http_client.get(API_URL.."/apis/"..api_id.."/plugins/", {name="rate-limiting", consumer_id=consumer_id}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + local _, status = http_client.patch(API_URL.."/apis/"..api_id.."/plugins/"..plugin_id, {enabled=false}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("rate-limiting", api_id, consumer_id)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status, headers = http_client.get(STUB_GET_URL, {}, {host = "hooks-plugins.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + assert.equals(10, tonumber(headers["x-ratelimit-limit-minute"])) + end) + + it("should invalidate a plugin when updating", function() + -- Making a request to populate the cache + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks-consumer.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("basic-auth", api_id, nil)) + assert.equals(200, status) + + -- Delete plugin + local response, status = http_client.get(API_URL.."/apis/"..api_id.."/plugins/", {name="basic-auth"}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + local _, status = http_client.patch(API_URL.."/apis/"..api_id.."/plugins/"..plugin_id, {enabled=false}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.plugin_key("basic-auth", api_id, nil)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again without any authorization + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com"}) + assert.equals(200, status) + end) + + end) + + describe("Consumer entity invalidation", function() + it("should invalidate a consumer when deleting", function() + -- Making a request to populate the cache + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/consumers/consumer1") + assert.equals(200, status) + local consumer_id = json.decode(response).id + assert.truthy(consumer_id) + + local response, status = http_client.get(API_URL.."/cache/"..cache.consumer_key(consumer_id)) + assert.equals(200, status) + assert.equals("consumer1", json.decode(response).username) + + -- Delete consumer + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.consumer_key(consumer_id)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(403, status) + end) + + it("should invalidate a consumer when updating", function() + -- Making a request to populate the cache + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/consumers/consumer1") + assert.equals(200, status) + local consumer_id = json.decode(response).id + assert.truthy(consumer_id) + + local response, status = http_client.get(API_URL.."/cache/"..cache.consumer_key(consumer_id)) + assert.equals(200, status) + assert.equals("consumer1", json.decode(response).username) + + -- Update consumer + local _, status = http_client.patch(API_URL.."/consumers/consumer1", {username="updated_consumer1"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.consumer_key(consumer_id)) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks-consumer.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Making sure the cache is updated + local response, status = http_client.get(API_URL.."/cache/"..cache.consumer_key(consumer_id)) + assert.equals(200, status) + assert.equals("updated_consumer1", json.decode(response).username) + end) + end) + + describe("API entity invalidation", function() + it("should invalidate ALL_APIS_BY_DICT when adding a new API", function() + -- Making a request to populate ALL_APIS_BY_DICT + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(json.decode(response).by_dns["hooks1.com"]) + assert.falsy(json.decode(response).by_dns["dynamic-hooks.com"]) + + -- Adding a new API + local _, status = http_client.post(API_URL.."/apis", {request_host="dynamic-hooks.com", upstream_url="http://mockbin.org"}) + assert.equals(201, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(json.decode(response).by_dns["hooks1.com"]) + assert.truthy(json.decode(response).by_dns["dynamic-hooks.com"]) + end) + + it("should invalidate ALL_APIS_BY_DICT when updating an API", function() + -- Making a request to populate ALL_APIS_BY_DICT + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(json.decode(response).by_dns["hooks1.com"]) + assert.equals("http://mockbin.com", json.decode(response).by_dns["hooks1.com"].upstream_url) + + -- Updating API + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks1.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local _, status = http_client.patch(API_URL.."/apis/"..api_id, {upstream_url="http://mockbin.org"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(200, status) + + -- Make sure the cache is populated with updated value + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(json.decode(response).by_dns["hooks1.com"]) + assert.equals("http://mockbin.org", json.decode(response).by_dns["hooks1.com"].upstream_url) + end) + + it("should invalidate ALL_APIS_BY_DICT when deleting an API", function() + -- Making a request to populate ALL_APIS_BY_DICT + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(200, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(1, utils.table_size(json.decode(response).by_dns)) + assert.truthy(json.decode(response).by_dns["hooks1.com"]) + + -- Deleting API + local response, status = http_client.get(API_URL.."/apis", {request_host="hooks1.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data).id + assert.truthy(api_id) + + local _, status = http_client.delete(API_URL.."/apis/"..api_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + if status ~= 200 then + exists = false + end + end + + -- Consuming the API again + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hooks1.com"}) + assert.equals(404, status) + + -- Make sure the cache is populated + local response, status = http_client.get(API_URL.."/cache/"..cache.all_apis_by_dict_key()) + assert.equals(200, status) + assert.truthy(0, utils.table_size(json.decode(response).by_dns)) + end) + end) + + describe("Serf events", function() + + local PID_FILE = "/tmp/serf_test.pid" + + local function start_serf() + local cmd_args = { + ["-node"] = "test_node", + ["-bind"] = "127.0.0.1:9000", + ["-profile"] = "wan", + ["-rpc-addr"] = "127.0.0.1:9001" + } + setmetatable(cmd_args, require "kong.tools.printable") + + local res, code = IO.os_execute("nohup serf agent "..tostring(cmd_args).." 2>&1 & echo $! > "..PID_FILE) + if code ~= 0 then + error("Error starting serf: "..res) + end + end + + local function stop_serf() + local pid = IO.read_file(PID_FILE) + IO.os_execute("kill "..pid) + end + + it("should syncronize nodes on members events", function() + start_serf() + + os.execute("sleep 5") -- Wait for both the first member to join, and for the seconday serf to start + + -- Tell Kong to join the new serf + local _, code = http_client.post(API_URL.."/cluster/", {address = "127.0.0.1:9000"}) + assert.equals(200, code) + + os.execute("sleep 3") + + local res, code = http_client.get(API_URL.."/cluster/") + local body = json.decode(res) + assert.equals(200, code) + assert.equals(2, #body.data) + + local found + for _, v in ipairs(body.data) do + if v.address == "127.0.0.1:9000" then + found = true + assert.equal("test_node", v.name) + assert.equal("alive", v.status) + else + assert.truthy(v.name) + assert.equal("alive", v.status) + end + end + assert.True(found) + + -- Killing serf + stop_serf() + + -- Triggering the status check + local _, code = IO.os_execute("serf reachability") + assert.equals(1, code) + + -- Wait a little bit to propagate the data + os.execute("sleep 45") + + -- Check again + local res, code = http_client.get(API_URL.."/cluster/") + local body = json.decode(res) + assert.equals(200, code) + assert.equals(2, #body.data) + + local found + for _, v in ipairs(body.data) do + if v.address == "127.0.0.1:9000" then + found = true + assert.equal("test_node", v.name) + assert.equal("failed", v.status) + else + assert.truthy(v.name) + assert.equal("alive", v.status) + end + end + assert.True(found) + end) + end) + +end) \ No newline at end of file diff --git a/spec/integration/dao/cassandra/base_dao_spec.lua b/spec/integration/dao/cassandra/base_dao_spec.lua index 2ecfd6d6f20..a2b8d2c9aa9 100644 --- a/spec/integration/dao/cassandra/base_dao_spec.lua +++ b/spec/integration/dao/cassandra/base_dao_spec.lua @@ -734,11 +734,6 @@ describe("Cassandra", function() local ok, err = dao_factory.plugins:delete(rows[1]) assert.falsy(err) assert.True(ok) - - rows, err = session:execute("SELECT * FROM plugins WHERE id = ?", {cassandra.uuid(rows[1].id)}) - assert.falsy(err) - assert.truthy(rows) - assert.equal(0, #rows) end) it("should delete an entity when it can be found without its primay key", function() local ok, err = dao_factory.consumers:delete(nil, { @@ -786,6 +781,53 @@ describe("Cassandra", function() end) end) + -- + -- Nodes tests + -- + + describe("Nodes", function() + + setup(function() + spec_helper.drop_db() + spec_helper.seed_db(100) + end) + + describe(":insert()", function() + local node, err = dao_factory.nodes:insert({ + cluster_listening_address = "wot.hello.com:1111", + name = "wot" + }) + assert.falsy(err) + assert.truthy(node) + assert.equal("wot.hello.com:1111", node.cluster_listening_address) + end) + + describe(":find_by_keys() and :delete()", function() + local nodes, err = dao_factory.nodes:find_by_keys({ + cluster_listening_address = "wot.hello.com:1111" + }) + + assert.falsy(err) + assert.truthy(nodes) + assert.equal(1, #nodes) + + local ok, err = dao_factory.nodes:delete({ + name = table.remove(nodes, 1).name + }) + + assert.True(ok) + assert.falsy(err) + end) + + describe(":find_all()", function() + local nodes, err = dao_factory.nodes:find_all() + assert.falsy(err) + assert.truthy(nodes) + assert.equal(100, #nodes) + end) + + end) + -- -- Plugins configuration additional behaviour -- @@ -899,4 +941,4 @@ describe("Cassandra", function() end) end) -- describe plugins configurations end) -- describe Base DAO -end) -- describe Cassandra +end) -- describe Cassandra \ No newline at end of file diff --git a/spec/integration/dao/cassandra/cascade_spec.lua b/spec/integration/dao/cassandra/cascade_spec.lua index c28b0d14566..e6f1256f03a 100644 --- a/spec/integration/dao/cassandra/cascade_spec.lua +++ b/spec/integration/dao/cassandra/cascade_spec.lua @@ -124,9 +124,9 @@ describe("Cassandra cascade delete", function() spec_helper.drop_db() end) it("should delete foreign keyauth_credentials when deleting a Consumer", function() - local ok, err = dao_factory.consumers:delete(consumer) + local res, err = dao_factory.consumers:delete(consumer) assert.falsy(err) - assert.True(ok) + assert.truthy(res) local results, err = dao_factory.keyauth_credentials:find_by_keys { consumer_id = consumer.id diff --git a/spec/integration/dao/cassandra/events_spec.lua b/spec/integration/dao/cassandra/events_spec.lua new file mode 100644 index 00000000000..0382fd1565e --- /dev/null +++ b/spec/integration/dao/cassandra/events_spec.lua @@ -0,0 +1,123 @@ +local event_types = require "kong.core.events".TYPES +local spec_helper = require "spec.spec_helpers" +local utils = require "kong.tools.utils" + +local env = spec_helper.get_env() -- test environment +local dao_factory = env.dao_factory +local events = env.events + +describe("Events", function() + + setup(function() + spec_helper.start_kong() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.prepare_db() + end) + + it("should fire event on insert", function() + local received = false + + events:subscribe(event_types.CLUSTER_PROPAGATE, function(message_t) + if message_t.type == event_types.ENTITY_CREATED then + assert.equals(event_types.ENTITY_CREATED, message_t.type) + assert.equals("apis", message_t.collection) + assert.truthy(message_t.entity) + assert.equals(5, utils.table_size(message_t.entity)) + assert.equals("test.com", message_t.entity.request_host) + assert.equals("http://mockbin.org", message_t.entity.upstream_url) + + received = true + end + end) + + local res, err = dao_factory.apis:insert({ + request_host = "test.com", + upstream_url = "http://mockbin.org" + }) + + assert.truthy(res) + assert.falsy(err) + + while not received do + -- Wait + end + assert.True(received) + end) + + it("should fire event on update", function() + local received = false + + events:subscribe(event_types.CLUSTER_PROPAGATE, function(message_t) + + if message_t.type == event_types.ENTITY_UPDATED then + assert.equals(event_types.ENTITY_UPDATED, message_t.type) + assert.equals("apis", message_t.collection) + assert.truthy(message_t.entity) + assert.equals(5, utils.table_size(message_t.entity)) + assert.equals("test.com", message_t.entity.request_host) + assert.equals("http://mockbin.org", message_t.entity.upstream_url) + + local new_entity = dao_factory.apis:find_by_primary_key({id=message_t.entity.id}) + assert.equals("http://mockbin2.org", new_entity.upstream_url) + + received = true + end + end) + + local res, err = dao_factory.apis:insert({ + request_host = "test.com", + upstream_url = "http://mockbin.org" + }) + assert.truthy(res) + assert.falsy(err) + + -- Update entity + res.upstream_url = "http://mockbin2.org" + local res, err = dao_factory.apis:update(res) + assert.truthy(res) + assert.falsy(err) + + while not received do + -- Wait + end + assert.True(received) + end) + + it("should fire event on delete", function() + local received = false + + events:subscribe(event_types.CLUSTER_PROPAGATE, function(message_t) + if message_t.type == event_types.ENTITY_DELETED then + assert.equals(event_types.ENTITY_DELETED, message_t.type) + assert.equals("apis", message_t.collection) + assert.truthy(message_t.entity) + assert.equals(5, utils.table_size(message_t.entity)) + assert.equals("test.com", message_t.entity.request_host) + assert.equals("http://mockbin.org", message_t.entity.upstream_url) + + received = true + end + end) + + local res, err = dao_factory.apis:insert({ + request_host = "test.com", + upstream_url = "http://mockbin.org" + }) + assert.truthy(res) + assert.falsy(err) + + dao_factory.apis:delete({id=res.id}) + + while not received do + -- Wait + end + assert.True(received) + end) + +end) \ No newline at end of file diff --git a/spec/integration/proxy/database_cache_spec.lua b/spec/integration/proxy/database_cache_spec.lua deleted file mode 100644 index 55cf9d81e71..00000000000 --- a/spec/integration/proxy/database_cache_spec.lua +++ /dev/null @@ -1,67 +0,0 @@ -local spec_helper = require "spec.spec_helpers" -local http_client = require "kong.tools.http_client" - -local env = spec_helper.get_env() - -describe("Database cache", function() - local fixtures - - setup(function() - spec_helper.prepare_db() - fixtures = spec_helper.insert_fixtures { - api = { - {name = "tests-database-cache", request_host = "cache.test", upstream_url = "http://httpbin.org"} - } - } - - spec_helper.start_kong() - end) - - teardown(function() - spec_helper.stop_kong() - end) - - it("should expire cache after five seconds", function() - -- trigger a db fetch for this API's plugins - http_client.get(spec_helper.PROXY_URL.."/get", {}, {host = "cache.test"}) - - -- Let's add the authentication plugin configuration - local _, err = env.dao_factory.plugins:insert { - name = "key-auth", - api_id = fixtures.api[1].id, - config = { - key_names = {"x-key"} - } - } - assert.falsy(err) - - -- Making the request immediately after will succeed - local _, status = http_client.get(spec_helper.PROXY_URL.."/get", {}, {host = "cache.test"}) - assert.are.equal(200, status) - - -- But waiting after the cache expiration (5 seconds) should block the request - os.execute("sleep "..tonumber(5)) - - local _, status = http_client.get(spec_helper.PROXY_URL.."/get", {}, {host = "cache.test"}) - assert.are.equal(401, status) - - -- Create a consumer and a key will make it work again - local consumer, err = env.dao_factory.consumers:insert {username = "john"} - assert.falsy(err) - - local _, err = env.dao_factory.keyauth_credentials:insert { - consumer_id = consumer.id, - key = "secret_key_123" - } - assert.falsy(err) - - -- This should fail, wrong key - local _, status = http_client.get(spec_helper.PROXY_URL.."/get", {}, {host = "cache.test", ["x-key"] = "secret_key"}) - assert.are.equal(403, status) - - -- This should work, right key - local _, status = http_client.get(spec_helper.PROXY_URL.."/get", {}, {host = "cache.test", ["x-key"] = "secret_key_123"}) - assert.are.equal(200, status) - end) - -end) diff --git a/spec/integration/proxy/resolver_spec.lua b/spec/integration/proxy/resolver_spec.lua index 597b0386ef4..98454c4ad6f 100644 --- a/spec/integration/proxy/resolver_spec.lua +++ b/spec/integration/proxy/resolver_spec.lua @@ -1,10 +1,10 @@ +local spec_helper = require "spec.spec_helpers" local ssl = require "ssl" local url = require "socket.url" local cjson = require "cjson" local utils = require "kong.tools.utils" local socket = require "socket" local constants = require "kong.constants" -local spec_helper = require "spec.spec_helpers" local http_client = require "kong.tools.http_client" local STUB_GET_URL = spec_helper.STUB_GET_URL diff --git a/spec/integration/tools/http_client_spec.lua b/spec/integration/tools/http_client_spec.lua index b1d2e7e6adc..e1aaa325e1e 100644 --- a/spec/integration/tools/http_client_spec.lua +++ b/spec/integration/tools/http_client_spec.lua @@ -85,6 +85,15 @@ describe("http_client", function() assert.are.equal("pippo", parsed_response.headers.Custom) end) + it("should send a valid POST request with plain text body", function() + local response, status, headers = http_client.post("http://httpbin.org/post", "Hello World", {["content-type"] = "text/plain"}) + assert.are.equal(200, status) + assert.truthy(headers) + assert.truthy(response) + local parsed_response = cjson.decode(response) + assert.are.equal("Hello World", parsed_response.data) + end) + end) describe("PUT", function() diff --git a/spec/plugins/acl/api_spec.lua b/spec/plugins/acl/api_spec.lua index 13a4cc2a2ce..1f828c3254a 100644 --- a/spec/plugins/acl/api_spec.lua +++ b/spec/plugins/acl/api_spec.lua @@ -115,5 +115,4 @@ describe("ACLs API", function() end) end) - -end) +end) \ No newline at end of file diff --git a/spec/plugins/acl/hooks_spec.lua b/spec/plugins/acl/hooks_spec.lua new file mode 100644 index 00000000000..640b3197d17 --- /dev/null +++ b/spec/plugins/acl/hooks_spec.lua @@ -0,0 +1,163 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("ACL Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "acl1.com", upstream_url = "http://mockbin.com"}, + {request_host = "acl2.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"}, + {username = "consumer2"} + }, + plugin = { + {name = "key-auth", config = {key_names = {"apikey"}}, __api = 1}, + {name = "acl", config = { whitelist = {"admin"}}, __api = 1}, + {name = "key-auth", config = {key_names = {"apikey"}}, __api = 2}, + {name = "acl", config = { whitelist = {"ya"}}, __api = 2} + }, + keyauth_credential = { + {key = "apikey123", __consumer = 1}, + {key = "apikey124", __consumer = 2} + }, + acl = { + {group="admin", __consumer = 1}, + {group="pro", __consumer = 1}, + {group="admin", __consumer = 2} + } + } + + end) + + local function get_consumer_id(username) + local response, status = http_client.get(API_URL.."/consumers/consumer1") + assert.equals(200, status) + local consumer_id = json.decode(response).id + assert.truthy(consumer_id) + return consumer_id + end + + local function get_acl_id(consumer_id_or_name, group_name) + local response, status = http_client.get(API_URL.."/consumers/"..consumer_id_or_name.."/acls/", {group=group_name}) + assert.equals(200, status) + local body = json.decode(response) + if #body.data == 1 then + return table.remove(body.data, 1).id + end + end + + describe("ACL entity invalidation", function() + it("should invalidate when ACL entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.acls_key(get_consumer_id("consumer1")) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete ACL group (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1/acls/"..get_acl_id("consumer1", "admin")) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(403, status) + end) + it("should invalidate when ACL entity is updated", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl2.com"}) + assert.equals(403, status) + + -- Check that cache is populated + local cache_key = cache.acls_key(get_consumer_id("consumer1")) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Update ACL group (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/consumer1/acls/"..get_acl_id("consumer1", "admin"), {group="ya"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(403, status) + + -- It should work now + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl2.com"}) + assert.equals(200, status) + end) + end) + + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.acls_key(get_consumer_id("consumer1")) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey = "apikey123"}, {host="acl1.com"}) + assert.equals(403, status) + end) + end) + +end) diff --git a/spec/plugins/basic-auth/crypto_spec.lua b/spec/plugins/basic-auth/crypto_spec.lua new file mode 100644 index 00000000000..5359d4c6198 --- /dev/null +++ b/spec/plugins/basic-auth/crypto_spec.lua @@ -0,0 +1,15 @@ +local crypto = require "kong.plugins.basic-auth.crypto" + +describe("Basic Authentication Crypt", function() + it("should encrypt", function() + local credential = { + consumer_id = "id123", + password = "pass123" + } + + local value = crypto.encrypt(credential) + assert.truthy(value) + assert.equals(40, string.len(value)) + assert.equals(crypto.encrypt(credential), crypto.encrypt(credential)) + end) +end) \ No newline at end of file diff --git a/spec/plugins/basic-auth/hooks_spec.lua b/spec/plugins/basic-auth/hooks_spec.lua new file mode 100644 index 00000000000..7cce20e42cc --- /dev/null +++ b/spec/plugins/basic-auth/hooks_spec.lua @@ -0,0 +1,144 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("Basic Authentication Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "basicauth.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"} + }, + plugin = { + {name = "basic-auth", config = {}, __api = 1} + }, + basicauth_credential = { + {username = "user123", password = "pass123", __consumer = 1} + } + } + end) + + describe("Basic Auth Credentials entity invalidation", function() + it("should invalidate when Basic Auth Credential entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.basicauth_credential_key("user123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/basic-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Basic Auth credential (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1/basic-auth/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(403, status) + end) + it("should invalidate when Basic Auth Credential entity is updated", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic aGVsbG8xMjM6cGFzczEyMw=="}) + assert.equals(403, status) + + -- Check that cache is populated + local cache_key = cache.basicauth_credential_key("user123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/basic-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Basic Auth credential (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/consumer1/basic-auth/"..credential_id, {username="hello123"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic aGVsbG8xMjM6cGFzczEyMw=="}) + assert.equals(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(403, status) + end) + end) + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.basicauth_credential_key("user123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {}, {host="basicauth.com", authorization = "Basic dXNlcjEyMzpwYXNzMTIz"}) + assert.equals(403, status) + end) + end) +end) diff --git a/spec/plugins/hmac-auth/hooks_spec.lua b/spec/plugins/hmac-auth/hooks_spec.lua new file mode 100644 index 00000000000..b8ce4c3423b --- /dev/null +++ b/spec/plugins/hmac-auth/hooks_spec.lua @@ -0,0 +1,164 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" +local base64 = require "base64" +local crypto = require "crypto" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("HMAC Authentication Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "hmacauth.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"} + }, + plugin = { + {name = "hmac-auth", config = {clock_skew = 3000}, __api = 1} + }, + hmacauth_credential = { + {username = "bob", secret = "secret", __consumer = 1} + } + } + end) + + local function hmac_sha1_binary(secret, data) + return crypto.hmac.digest("sha1", data, secret, true) + end + + local function get_authorization(username) + local date = os.date("!%a, %d %b %Y %H:%M:%S GMT") + local encodedSignature = base64.encode(hmac_sha1_binary("secret", "date: "..date)) + return [["hmac username="]]..username..[[",algorithm="hmac-sha1",headers="date",signature="]]..encodedSignature..[["]], date + end + + describe("HMAC Auth Credentials entity invalidation", function() + it("should invalidate when Hmac Auth Credential entity is deleted", function() + -- It should work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.hmacauth_credential_key("bob") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/hmac-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Hmac Auth credential (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1/hmac-auth/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(403, status) + end) + it("should invalidate when Hmac Auth Credential entity is updated", function() + -- It should work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(200, status) + + -- It should not work + local authorization, date = get_authorization("hello123") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(403, status) + + -- Check that cache is populated + local cache_key = cache.hmacauth_credential_key("bob") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/hmac-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Hmac Auth credential (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/consumer1/hmac-auth/"..credential_id, {username="hello123"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local authorization, date = get_authorization("hello123") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(200, status) + + -- It should not work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(403, status) + end) + end) + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.hmacauth_credential_key("bob") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local authorization, date = get_authorization("bob") + local _, status = http_client.get(STUB_GET_URL, {}, {host = "hmacauth.com", date = date, authorization = authorization}) + assert.equals(403, status) + end) + end) +end) diff --git a/spec/plugins/jwt/hooks_spec.lua b/spec/plugins/jwt/hooks_spec.lua new file mode 100644 index 00000000000..3c06a656e24 --- /dev/null +++ b/spec/plugins/jwt/hooks_spec.lua @@ -0,0 +1,158 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" +local jwt_encoder = require "kong.plugins.jwt.jwt_parser" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("JWT Authentication Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "jwt.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"} + }, + plugin = { + {name = "jwt", config = {}, __api = 1} + }, + jwt_secret = { + {key = "key123", secret = "secret123", __consumer = 1} + } + } + end) + + local PAYLOAD = { + iss = nil, + nbf = os.time(), + iat = os.time(), + exp = os.time() + 3600 + } + + local function get_authorization(key, secret) + PAYLOAD.iss = key + local jwt = jwt_encoder.encode(PAYLOAD, secret) + return "Bearer "..jwt + end + + describe("JWT Credentials entity invalidation", function() + it("should invalidate when JWT Auth Credential entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.jwtauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/jwt/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete JWT credential (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1/jwt/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(403, status) + end) + it("should invalidate when JWT Auth Credential entity is updated", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("keyhello", "secret123")}) + assert.equal(403, status) + + -- Check that cache is populated + local cache_key = cache.jwtauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/jwt/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete JWT credential (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/consumer1/jwt/"..credential_id, {key="keyhello"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("keyhello", "secret123")}) + assert.equal(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(403, status) + end) + end) + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.jwtauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, nil, {host = "jwt.com", authorization = get_authorization("key123", "secret123")}) + assert.equal(403, status) + end) + end) +end) diff --git a/spec/plugins/key-auth/hooks_spec.lua b/spec/plugins/key-auth/hooks_spec.lua new file mode 100644 index 00000000000..ed94fdd89ae --- /dev/null +++ b/spec/plugins/key-auth/hooks_spec.lua @@ -0,0 +1,145 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local API_URL = spec_helper.API_URL + +describe("Key Authentication Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + {request_host = "keyauth.com", upstream_url = "http://mockbin.com"} + }, + consumer = { + {username = "consumer1"} + }, + plugin = { + {name = "key-auth", config = {}, __api = 1} + }, + keyauth_credential = { + {key = "key123", __consumer = 1} + } + } + end) + + describe("Key Auth Credentials entity invalidation", function() + it("should invalidate when Key Auth Credential entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.keyauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/key-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Key Auth credential (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1/key-auth/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(403, status) + end) + it("should invalidate when Key Auth Credential entity is updated", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey="updkey123"}, {host="keyauth.com"}) + assert.equals(403, status) + + -- Check that cache is populated + local cache_key = cache.keyauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/consumer1/key-auth/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete Key Auth credential (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/consumer1/key-auth/"..credential_id, {key="updkey123"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey="updkey123"}, {host="keyauth.com"}) + assert.equals(200, status) + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(403, status) + end) + end) + + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(200, status) + + -- Check that cache is populated + local cache_key = cache.keyauth_credential_key("key123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/consumer1") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.get(STUB_GET_URL, {apikey="key123"}, {host="keyauth.com"}) + assert.equals(403, status) + end) + end) +end) diff --git a/spec/plugins/oauth2/access_spec.lua b/spec/plugins/oauth2/access_spec.lua index 626df57f261..17cf6646645 100644 --- a/spec/plugins/oauth2/access_spec.lua +++ b/spec/plugins/oauth2/access_spec.lua @@ -34,8 +34,8 @@ end describe("Authentication Plugin", function() - local function prepare() - spec_helper.drop_db() + setup(function() + spec_helper.prepare_db() spec_helper.insert_fixtures { api = { { name = "tests-oauth2", request_host = "oauth2.com", upstream_url = "http://mockbin.com" }, @@ -60,21 +60,13 @@ describe("Authentication Plugin", function() { client_id = "clientid123", client_secret = "secret123", redirect_uri = "http://google.com/kong", name="testapp", __consumer = 1 } } } - end - - setup(function() - spec_helper.prepare_db() + spec_helper.start_kong() end) teardown(function() spec_helper.stop_kong() end) - before_each(function() - spec_helper.restart_kong() -- Required because the uuid function doesn't seed itself every millisecond, but every second - prepare() - end) - describe("OAuth2 Authorization", function() describe("Code Grant", function() @@ -831,4 +823,4 @@ describe("Authentication Plugin", function() end) -end) +end) \ No newline at end of file diff --git a/spec/plugins/oauth2/api_spec.lua b/spec/plugins/oauth2/api_spec.lua index b49f4e2e0a2..a0010804593 100644 --- a/spec/plugins/oauth2/api_spec.lua +++ b/spec/plugins/oauth2/api_spec.lua @@ -123,4 +123,56 @@ describe("OAuth 2 Credentials API", function() end) end) + + describe("/oauth2_tokens/", function() + + -- Create credential + local response, status = http_client.post(BASE_URL, {name = "Test APP", redirect_uri = "http://google.com/"}) + assert.equal(201, status) + credential = json.decode(response) + + local token + + BASE_URL = spec_helper.API_URL.."/oauth2_tokens/" + + describe("POST", function() + + it("[SUCCESS] should create a oauth2 token", function() + local response, status = http_client.post(BASE_URL, {credential_id = credential.id, expires_in = 10}) + assert.equal(201, status) + token = json.decode(response) + assert.equal(credential.id, token.credential_id) + assert.equal(10, token.expires_in) + assert.truthy(token.access_token) + assert.truthy(token.refresh_token) + assert.equal("bearer", token.token_type) + end) + + it("[FAILURE] should return proper errors", function() + local response, status = http_client.post(BASE_URL, {}) + assert.equal(400, status) + assert.equal('{"credential_id":"credential_id is required","expires_in":"expires_in is required"}\n', response) + end) + + end) + + describe("GET", function() + + it("should retrieve by id", function() + local response, status = http_client.get(BASE_URL..token.id) + assert.equal(200, status) + local body = json.decode(response) + assert.equals(credential.id, body.credential_id) + end) + + it("should retrieve all", function() + local response, status = http_client.get(BASE_URL) + assert.equal(200, status) + local body = json.decode(response) + assert.equals(1, body.total) + end) + + end) + + end) end) diff --git a/spec/plugins/oauth2/hooks_spec.lua b/spec/plugins/oauth2/hooks_spec.lua new file mode 100644 index 00000000000..72c7b6bab59 --- /dev/null +++ b/spec/plugins/oauth2/hooks_spec.lua @@ -0,0 +1,302 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" +local rex = require "rex_pcre" + +local STUB_GET_URL = spec_helper.STUB_GET_URL +local PROXY_SSL_URL = spec_helper.PROXY_SSL_URL +local API_URL = spec_helper.API_URL + +local env = spec_helper.get_env() -- test environment +local dao_factory = env.dao_factory +local configuration = env.configuration +configuration.cassandra = configuration[configuration.database].properties + +describe("OAuth2 Authentication Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + { request_host = "oauth2.com", upstream_url = "http://mockbin.com" } + }, + consumer = { + { username = "auth_tests_consumer" } + }, + plugin = { + { name = "oauth2", config = { scopes = { "email", "profile" }, mandatory_scope = true, provision_key = "provision123", token_expiration = 5, enable_implicit_grant = true }, __api = 1 } + }, + oauth2_credential = { + { client_id = "clientid123", client_secret = "secret123", redirect_uri = "http://google.com/kong", name="testapp", __consumer = 1 } + } + } + end) + + local function provision_code(client_id) + local response = http_client.post(PROXY_SSL_URL.."/oauth2/authorize", { provision_key = "provision123", authenticated_userid = "id123", client_id = client_id, scope = "email", response_type = "code", state = "hello", authenticated_userid = "userid123" }, {host = "oauth2.com"}) + local body = json.decode(response) + if body.redirect_uri then + local matches = rex.gmatch(body.redirect_uri, "^http://google\\.com/kong\\?code=([\\w]{32,32})&state=hello$") + local code + for line in matches do + code = line + end + local data = dao_factory.oauth2_authorization_codes:find_by_keys({code = code}) + return data[1].code + end + end + + describe("OAuth2 Credentials entity invalidation", function() + it("should invalidate when OAuth2 Credential entity is deleted", function() + -- It should work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_credential_key("clientid123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/auth_tests_consumer/oauth2/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete OAuth2 credential (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/auth_tests_consumer/oauth2/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(400, status) + end) + it("should invalidate when OAuth2 Credential entity is updated", function() + -- It should work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- It should not work + local code = provision_code("updclientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "updclientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(400, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_credential_key("clientid123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/auth_tests_consumer/oauth2/") + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Update OAuth2 credential (which triggers invalidation) + local _, status = http_client.patch(API_URL.."/consumers/auth_tests_consumer/oauth2/"..credential_id, {client_id="updclientid123"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local code = provision_code("updclientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "updclientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- It should not work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(400, status) + end) + end) + + describe("Consumer entity invalidation", function() + it("should invalidate when Consumer entity is deleted", function() + -- It should work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_credential_key("clientid123") + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete Consumer (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/auth_tests_consumer") + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local code = provision_code("clientid123") + local _, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(400, status) + end) + end) + + describe("OAuth2 access token entity invalidation", function() + it("should invalidate when OAuth2 token entity is deleted", function() + -- It should work + local code = provision_code("clientid123") + local response, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + local token = json.decode(response) + assert.truthy(token) + + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_token_key(token.access_token) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete token (which triggers invalidation) + local res = dao_factory.oauth2_tokens:find_by_keys({access_token=token.access_token}) + local token_id = res[1].id + assert.truthy(token_id) + + local _, status = http_client.delete(API_URL.."/oauth2_tokens/"..token_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(401, status) + end) + it("should invalidate when Oauth2 token entity is updated", function() + -- It should work + local code = provision_code("clientid123") + local response, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + local token = json.decode(response) + assert.truthy(token) + + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- It should not work + local _, status = http_client.post(STUB_GET_URL, { access_token = "hello_token" }, {host = "oauth2.com"}) + assert.are.equal(401, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_token_key(token.access_token) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Update OAuth 2 token (which triggers invalidation) + local res = dao_factory.oauth2_tokens:find_by_keys({access_token=token.access_token}) + local token_id = res[1].id + assert.truthy(token_id) + + local _, status = http_client.patch(API_URL.."/oauth2_tokens/"..token_id, {access_token="hello_token"}) + assert.equals(200, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should work + local _, status = http_client.post(STUB_GET_URL, { access_token = "hello_token" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- It should not work + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(401, status) + end) + end) + + describe("OAuth2 client entity invalidation", function() + it("should invalidate token when OAuth2 client entity is deleted", function() + -- It should work + local code = provision_code("clientid123") + local response, status = http_client.post(PROXY_SSL_URL.."/oauth2/token", { code = code, client_id = "clientid123", client_secret = "secret123", grant_type = "authorization_code" }, {host = "oauth2.com"}) + assert.are.equal(200, status) + local token = json.decode(response) + assert.truthy(token) + + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(200, status) + + -- Check that cache is populated + local cache_key = cache.oauth2_token_key(token.access_token) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve credential ID + local response, status = http_client.get(API_URL.."/consumers/auth_tests_consumer/oauth2/", {client_id="clientid123"}) + assert.equals(200, status) + local credential_id = table.remove(json.decode(response).data, 1).id + assert.truthy(credential_id) + + -- Delete OAuth2 client (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/consumers/auth_tests_consumer/oauth2/"..credential_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local _, status = http_client.post(STUB_GET_URL, { access_token = token.access_token }, {host = "oauth2.com"}) + assert.are.equal(401, status) + end) + end) + +end) diff --git a/spec/plugins/ssl/hooks_spec.lua b/spec/plugins/ssl/hooks_spec.lua new file mode 100644 index 00000000000..558311123a6 --- /dev/null +++ b/spec/plugins/ssl/hooks_spec.lua @@ -0,0 +1,166 @@ +local json = require "cjson" +local http_client = require "kong.tools.http_client" +local spec_helper = require "spec.spec_helpers" +local cache = require "kong.tools.database_cache" +local ssl_fixtures = require "spec.plugins.ssl.fixtures" +local IO = require "kong.tools.io" +local url = require "socket.url" + +local STUB_GET_SSL_URL = spec_helper.STUB_GET_SSL_URL +local API_URL = spec_helper.API_URL + +describe("SSL Hooks", function() + + setup(function() + spec_helper.prepare_db() + end) + + teardown(function() + spec_helper.stop_kong() + end) + + before_each(function() + spec_helper.restart_kong() + + spec_helper.drop_db() + spec_helper.insert_fixtures { + api = { + { request_host = "ssl1.com", upstream_url = "http://mockbin.com" } + }, + plugin = { + { name = "ssl", config = { cert = ssl_fixtures.cert, key = ssl_fixtures.key }, __api = 1 } + } + } + end) + + describe("SSL plugin entity invalidation", function() + it("should invalidate when SSL plugin is deleted", function() + -- It should work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.truthy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + + -- Check that cache is populated + local response, status = http_client.get(API_URL.."/apis/", {request_host="ssl1.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data, 1).id + assert.truthy(api_id) + + local cache_key = cache.ssl_data(api_id) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve SSL plugin + local response, status = http_client.get(API_URL.."/plugins/", {api_id=api_id, name="ssl"}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + -- Delete SSL plugin (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/plugins/"..plugin_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.falsy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + end) + it("should invalidate when Basic Auth Credential entity is updated", function() + -- It should work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.truthy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + + -- Check that cache is populated + local response, status = http_client.get(API_URL.."/apis/", {request_host="ssl1.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data, 1).id + assert.truthy(api_id) + + local cache_key = cache.ssl_data(api_id) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Retrieve SSL plugin + local response, status = http_client.get(API_URL.."/plugins/", {api_id=api_id, name="ssl"}) + assert.equals(200, status) + local plugin_id = table.remove(json.decode(response).data, 1).id + assert.truthy(plugin_id) + + -- Update SSL plugin (which triggers invalidation) + local kong_working_dir = spec_helper.get_env(spec_helper.TEST_CONF_FILE).configuration.nginx_working_dir + local ssl_cert_path = IO.path:join(kong_working_dir, "ssl", "kong-default.crt") + local ssl_key_path = IO.path:join(kong_working_dir, "ssl", "kong-default.key") + + local res = IO.os_execute("curl -X PATCH -s -o /dev/null -w \"%{http_code}\" "..API_URL.."/apis/"..api_id.."/plugins/"..plugin_id.." --form \"config.cert=@"..ssl_cert_path.."\" --form \"config.key=@"..ssl_key_path.."\"") + assert.are.equal(200, tonumber(res)) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.falsy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + assert.truthy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT Department/CN=localhost")) + end) + end) + + describe("API entity invalidation", function() + it("should invalidate when API entity is deleted", function() + -- It should work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.truthy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + + -- Check that cache is populated + local response, status = http_client.get(API_URL.."/apis/", {request_host="ssl1.com"}) + assert.equals(200, status) + local api_id = table.remove(json.decode(response).data, 1).id + assert.truthy(api_id) + + local cache_key = cache.ssl_data(api_id) + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + assert.equals(200, status) + + -- Delete API (which triggers invalidation) + local _, status = http_client.delete(API_URL.."/apis/"..api_id) + assert.equals(204, status) + + -- Wait for cache to be invalidated + local exists = true + while(exists) do + local _, status = http_client.get(API_URL.."/cache/"..cache_key) + if status ~= 200 then + exists = false + end + end + + -- It should not work + local parsed_url = url.parse(STUB_GET_SSL_URL) + local res = IO.os_execute("(echo \"GET /\"; sleep 2) | openssl s_client -connect "..parsed_url.host..":"..tostring(parsed_url.port).." -servername ssl1.com") + + assert.falsy(res:match("US/ST=California/L=San Francisco/O=Kong/OU=IT/CN=ssl1.com")) + end) + end) +end) diff --git a/spec/spec_helpers.lua b/spec/spec_helpers.lua index fbcd4a71f1c..f7ceb3f0485 100644 --- a/spec/spec_helpers.lua +++ b/spec/spec_helpers.lua @@ -2,15 +2,16 @@ -- It is built so that it only needs to be required at the beginning of any spec file. -- It supports other environments by passing a configuration file. +require "kong.tools.ngx_stub" + local IO = require "kong.tools.io" local dao = require "kong.tools.dao_loader" local Faker = require "kong.tools.faker" local config = require "kong.tools.config_loader" local Threads = require "llthreads2.ex" +local Events = require "kong.core.events" local Migrations = require "kong.tools.migrations" -require "kong.tools.ngx_stub" - local _M = {} -- Constants @@ -33,10 +34,12 @@ _M.envs = {} -- a factory/migrations/faker that are environment-specific to this new config. function _M.add_env(conf_file) local env_configuration = config.load(conf_file) - local env_factory = dao.load(env_configuration) + local events = Events() + local env_factory = dao.load(env_configuration, false, events) _M.envs[conf_file] = { configuration = env_configuration, dao_factory = env_factory, + events = events, migrations = Migrations(env_factory, env_configuration), conf_file = conf_file, faker = Faker(env_factory) @@ -56,26 +59,19 @@ end -- -- OS and bin/kong helpers -- -local function kong_bin(signal, conf_file, skip_wait) +local function kong_bin(signal, conf_file) local env = _M.get_env(conf_file) local result, exit_code = IO.os_execute(_M.KONG_BIN.." "..signal.." -c "..env.conf_file) - if exit_code ~= 0 then error("spec_helper cannot "..signal.." kong: \n"..result) end - if signal == "start" and not skip_wait then - os.execute("while ! [ -f "..env.configuration.pid_file.." ]; do sleep 0; done") - elseif signal == "quit" or signal == "stop" then - os.execute("while [ -f "..env.configuration.pid_file.." ]; do sleep 0; done") - end - return result, exit_code end -for _, signal in ipairs({ "start", "stop", "restart", "reload", "quit" }) do +for _, signal in ipairs({ "start", "stop", "restart", "reload", "quit", "status" }) do _M[signal.."_kong"] = function(conf_file, skip_wait) - return kong_bin(signal, conf_file, skip_wait) + return kong_bin(signal, conf_file) end end @@ -233,4 +229,4 @@ end -- Add the default env to our spec_helper _M.add_env(_M.TEST_CONF_FILE) -return _M +return _M \ No newline at end of file diff --git a/spec/unit/api/app_spec.lua b/spec/unit/api/app_spec.lua index c68fd3fa445..1fca26d6f2c 100644 --- a/spec/unit/api/app_spec.lua +++ b/spec/unit/api/app_spec.lua @@ -1,7 +1,7 @@ -local app = require "kong.api.app" - require "kong.tools.ngx_stub" +local app = require "kong.api.app" + local stub = { req = { headers = {} }, add_params = function() end, diff --git a/spec/unit/cli/utils_spec.lua b/spec/unit/cli/utils_spec.lua deleted file mode 100644 index 8806059f7d7..00000000000 --- a/spec/unit/cli/utils_spec.lua +++ /dev/null @@ -1,29 +0,0 @@ -local cutils = require "kong.cli.utils" -local socket = require "socket" - -describe("CLI Utils", function() - pending("should check if a port is open", function() - local PORT = 30000 - local server, success, err - - -- Check a currently closed port - assert.truthy(cutils.is_port_bindable(PORT)) - - -- Check an open port, with SO_REUSEADDR set - server = socket.tcp() - assert(server:setoption('reuseaddr', true)) - assert(server:bind("*", PORT)) - assert(server:listen()) - success, err = cutils.is_port_bindable(PORT) - server:close() - assert.truthy(success, err) - - -- Check an open port, without SO_REUSEADDR set - server = socket.tcp() - assert(server:bind("*", PORT)) - assert(server:listen()) - success, err = cutils.is_port_bindable(PORT) - server:close() - assert.falsy(success, err) - end) -end) diff --git a/spec/unit/core/resolver_spec.lua b/spec/unit/core/resolver_spec.lua index 7d326537cdb..d01f876b932 100644 --- a/spec/unit/core/resolver_spec.lua +++ b/spec/unit/core/resolver_spec.lua @@ -1,8 +1,8 @@ -local resolver = require "kong.core.resolver" - -- Stubs require "kong.tools.ngx_stub" +local resolver = require "kong.core.resolver" + local APIS_FIXTURES = { -- request_host {name = "mockbin", request_host = "mockbin.com", upstream_url = "http://mockbin.com"}, diff --git a/spec/unit/dao/cassandra/migrations_spec.lua b/spec/unit/dao/cassandra/migrations_spec.lua index 5c0b0e660f8..b99dbcb151d 100644 --- a/spec/unit/dao/cassandra/migrations_spec.lua +++ b/spec/unit/dao/cassandra/migrations_spec.lua @@ -54,4 +54,4 @@ describe("Cassandra migrations", function() assert.equal("invalid replication_strategy class", err) end) end) -end) +end) \ No newline at end of file diff --git a/spec/unit/rockspec_spec.lua b/spec/unit/rockspec_spec.lua new file mode 100644 index 00000000000..be90d61440a --- /dev/null +++ b/spec/unit/rockspec_spec.lua @@ -0,0 +1,52 @@ +local stringy = require "stringy" +local IO = require "kong.tools.io" +local fs = require "luarocks.fs" + +describe("Rockspec file", function() + + it("should include all the Lua modules", function() + local rockspec_path + for _, filename in ipairs(fs.list_dir(".")) do + if stringy.endswith(filename, "rockspec") then + rockspec_path = filename + break + end + end + if not rockspec_path then + error("Can't find the rockspec file") + end + + loadfile(rockspec_path)() + + -- Function that checks if the path has been imported as a module + local is_in_rockspec = function(path) + if stringy.startswith(path, "./") then + path = string.sub(path, 3) + end + local found = false + for _, v in pairs(build.modules) do + if v == path then + found = true + break + end + end + return found + end + + local res = IO.os_execute("find . -type f -name *.lua", true) + if not res or stringy.strip(res) == "" then + error("Error executing the command") + end + + local files = stringy.split(res, "\n") + for _, v in ipairs(files) do + local path = stringy.strip(v) + if path ~= "" and stringy.startswith(path, "./kong") then + if not is_in_rockspec(path) then + error("Module "..path.." is not declared in rockspec") + end + end + end + end) + +end) \ No newline at end of file diff --git a/spec/unit/tools/config_loader_spec.lua b/spec/unit/tools/config_loader_spec.lua index 00582eec367..5b1163ac067 100644 --- a/spec/unit/tools/config_loader_spec.lua +++ b/spec/unit/tools/config_loader_spec.lua @@ -19,8 +19,11 @@ describe("Configuration validation", function() assert.falsy(errors) assert.truthy(conf.custom_plugins) - assert.truthy(conf.admin_api_port) - assert.truthy(conf.proxy_port) + assert.truthy(conf.admin_api_listen) + assert.truthy(conf.proxy_listen) + assert.truthy(conf.proxy_listen_ssl) + assert.truthy(conf.cluster_listen) + assert.truthy(conf.cluster_listen_rpc) assert.truthy(conf.database) assert.truthy(conf.cassandra) @@ -45,8 +48,8 @@ describe("Configuration validation", function() end) it("should validate various types", function() local ok, errors = config.validate({ - proxy_port = "string", - database = "cassandra", + proxy_listen = 123, + database = 777, cassandra = { contact_points = "127.0.0.1", ssl = { @@ -56,7 +59,9 @@ describe("Configuration validation", function() }) assert.False(ok) assert.truthy(errors) - assert.equal("must be a number", errors.proxy_port) + assert.equal("must be a string", errors.proxy_listen) + assert.equal("must be a string", errors.database[1]) + assert.equal("must be one of: 'cassandra'", errors.database[2]) assert.equal("must be a array", errors["cassandra.contact_points"]) assert.equal("must be a boolean", errors["cassandra.ssl.enabled"]) assert.falsy(errors.ssl_cert_path) @@ -81,5 +86,72 @@ describe("Configuration validation", function() assert.False(ok) assert.equal("must be one of: 'cassandra'", errors.database) end) + it("should validate the selected dns_resolver property", function() + local ok, errors = config.validate({dns_resolver = "foo"}) + assert.False(ok) + assert.equal("must be one of: 'server, dnsmasq'", errors.dns_resolver) + end) + it("should validate the host:port listen addresses", function() + -- Missing port + local ok, errors = config.validate({proxy_listen = "foo"}) + assert.False(ok) + assert.equal("foo is not a valid \"host:port\" value", errors.proxy_listen) + + -- Port invalid + ok, errors = config.validate({proxy_listen = "foo:asd"}) + assert.False(ok) + assert.equal("foo:asd is not a valid \"host:port\" value", errors.proxy_listen) + + -- Port too large + ok, errors = config.validate({proxy_listen = "foo:8000000"}) + assert.False(ok) + assert.equal("foo:8000000 is not a valid \"host:port\" value", errors.proxy_listen) + + -- Only port + ok, errors = config.validate({proxy_listen = "1231"}) + assert.False(ok) + assert.equal("1231 is not a valid \"host:port\" value", errors.proxy_listen) + + -- Only semicolon and port + ok, errors = config.validate({proxy_listen = ":1231"}) + assert.False(ok) + assert.equal(":1231 is not a valid \"host:port\" value", errors.proxy_listen) + + -- Valid with hostname + ok, errors = config.validate({proxy_listen = "hello:1231"}) + assert.True(ok) + assert.falsy(errors) + + -- Valid with IP + ok, errors = config.validate({proxy_listen = "1.1.1.1:1231"}) + assert.True(ok) + assert.falsy(errors) + end) + it("should validate the ip:port listen addresses", function() + -- Hostname instead of IP + local ok, errors = config.validate({cluster_listen = "hello.com:1231"}) + assert.False(ok) + assert.equal("hello.com:1231 is not a valid \"ip:port\" value", errors.cluster_listen) + + -- Invalid IP + ok, errors = config.validate({cluster_listen = "777.1.1.1:1231"}) + assert.False(ok) + assert.equal("777.1.1.1:1231 is not a valid \"ip:port\" value", errors.cluster_listen) + + -- Valid + ok, errors = config.validate({cluster_listen = "1.1.1.1:1231"}) + assert.True(ok) + assert.falsy(errors) + + -- Invalid cluster.advertise + ok, errors = config.validate({cluster={advertise = "1"}}) + assert.False(ok) + assert.equal("1 is not a valid \"ip:port\" value", errors["cluster.advertise"]) + + -- Valid cluster.advertise + ok, errors = config.validate({cluster={advertise = "1.1.1.1:1231"}}) + assert.True(ok) + assert.falsy(errors) + end) end) diff --git a/spec/unit/tools/database_cache_spec.lua b/spec/unit/tools/database_cache_spec.lua index e24956443c7..b716abfb2ff 100644 --- a/spec/unit/tools/database_cache_spec.lua +++ b/spec/unit/tools/database_cache_spec.lua @@ -1,22 +1,31 @@ +require "kong.tools.ngx_stub" local cache = require "kong.tools.database_cache" describe("Database cache", function() it("should return a valid API cache key", function() - assert.are.equal("apis/httpbin.org", cache.api_key("httpbin.org")) + assert.are.equal("apis:httpbin.org", cache.api_key("httpbin.org")) end) it("should return a valid PLUGIN cache key", function() - assert.are.equal("plugins/authentication/api123/app123", cache.plugin_key("authentication", "api123", "app123")) - assert.are.equal("plugins/authentication/api123", cache.plugin_key("authentication", "api123")) + assert.are.equal("plugins:authentication:api123:app123", cache.plugin_key("authentication", "api123", "app123")) + assert.are.equal("plugins:authentication:api123", cache.plugin_key("authentication", "api123")) end) it("should return a valid KeyAuthCredential cache key", function() - assert.are.equal("keyauth_credentials/username", cache.keyauth_credential_key("username")) + assert.are.equal("keyauth_credentials:username", cache.keyauth_credential_key("username")) end) it("should return a valid BasicAuthCredential cache key", function() - assert.are.equal("basicauth_credentials/username", cache.basicauth_credential_key("username")) + assert.are.equal("basicauth_credentials:username", cache.basicauth_credential_key("username")) + end) + + it("should return a valid HmacAuthCredential cache key", function() + assert.are.equal("hmacauth_credentials:username", cache.hmacauth_credential_key("username")) + end) + + it("should return a valid JWTAuthCredential cache key", function() + assert.are.equal("jwtauth_credentials:hello", cache.jwtauth_credential_key("hello")) end) it("should return a valid requests cache key", function() diff --git a/spec/unit/tools/faker_spec.lua b/spec/unit/tools/faker_spec.lua index 47db1bcf168..6f1b3d9ccc7 100644 --- a/spec/unit/tools/faker_spec.lua +++ b/spec/unit/tools/faker_spec.lua @@ -4,7 +4,7 @@ local DaoError = require "kong.dao.error" describe("Faker", function() - local ENTITIES_TYPES = { "api", "consumer", "plugin" } + local ENTITIES_TYPES = { "api", "consumer", "plugin", "node" } local factory_mock = {} local insert_spy