From f7a21da9e17c2eed096839dbc80aae336f1b856a Mon Sep 17 00:00:00 2001 From: An Tran Date: Sat, 14 Oct 2023 17:46:49 +1000 Subject: [PATCH 1/5] Add lua-resty-redis-connector package --- gateway/Roverfile.lock | 39 +++++++++++++++++----------------- gateway/apicast-scm-1.rockspec | 1 + 2 files changed, 21 insertions(+), 19 deletions(-) diff --git a/gateway/Roverfile.lock b/gateway/Roverfile.lock index 81ca5e27a..e0aea1926 100644 --- a/gateway/Roverfile.lock +++ b/gateway/Roverfile.lock @@ -1,31 +1,32 @@ -argparse 0.7.1-1||production -busted 2.1.1-1||testing -date 2.2-2||production -dkjson 2.6-1||testing +argparse 0.6.0-1|412e6aca393e365f92c0315dfe50181b193f1ace|production +busted 2.1.1-1|e3ed48759b625f2e37bf02ccc057b2b98108f108|testing +date 2.2-2|8d74567cf979c1eab2c6b6ca2e3b978fa40569a2|production +dkjson 2.5-2||testing fifo 0.2-0||development -inspect 3.1.3-0||production +inspect 3.1.1-0||production jsonschema 0.8-0|c1d72d86bb3dc5b33da57d47febc47657d29ea74|testing ldoc 1.4.6-2||development liquid 0.2.0-2||production lua-resty-env 0.4.0-1||production lua-resty-execvp 0.1.1-1||production -lua-resty-http 0.15-0||production -lua-resty-iputils 0.3.0-2||production -lua-resty-jit-uuid 0.0.7-2||production -lua-resty-jwt 0.2.0-0||production +lua-resty-http 0.15-0|41b2e822ce5c19f64e293b7dc2d5d244e511615d|production +lua-resty-iputils 0.3.0-1|6110b41eaa52efd25e56f89e34412ab95f700d57|production +lua-resty-jit-uuid 0.0.7-2|64ae38de75c9d58f330d89e140ac872771c19223|production +lua-resty-jwt 0.2.0-0|2a62ff95eae91df6bd8655080a4b9b04c61bec6b|production +lua-resty-redis-connector 0.11.0-0||production lua-resty-repl 0.0.6-0|3878f41b7e8f97b1c96919db19dbee9496569dda|development lua-resty-url 0.3.5-1||production lua-term 0.7-1||testing -lua_cliargs 3.0-2||testing -luacov 0.15.0-1||testing -luafilesystem 1.8.0-1||production,development,testing -luassert 1.9.0-1||testing +lua_cliargs 3.0-1||testing +luacov 0.13.0-1|637c48d59f722050d718d0c398f655bc7fe1707a|testing +luafilesystem 1.7.0-2|de87218e9798c4dd1a40d65403d99e9e82e1cfa0|testing,production,development +luassert 1.9.0-1|8d8dc8a54cc468048a128a867f6449a6c3fdd11a|testing luasystem 0.2.1-0||testing -lyaml 6.2.8-1||production -markdown 0.33-1||development +lyaml 6.2.4-1||production +markdown 0.33-1|8c09109924b218aaecbfd4d4b1de538269c4d765|development mediator_lua 1.1.2-0||testing -net-url 1.1-1||testing -nginx-lua-prometheus 0.20181120-3||production -penlight 1.13.1-1||production,development,testing +net-url 1.1-1|32acd84d06e16ddffc975adafce9cea26f3b2dd1|testing +nginx-lua-prometheus 0.20181120-3|379c0a4d4d6f3c5b0eb93691fc7e14fff498e1ca|production +penlight 1.7.0-1|1cce50d2cd3b3dd209d2aa1f5ad72398ab1e8d48|testing,production,development router 2.1-0||production -say 1.4.1-3||testing \ No newline at end of file +say 1.4.1-3|45a3057e68c52b34ab59ef167efeb2340e356661|testing \ No newline at end of file diff --git a/gateway/apicast-scm-1.rockspec b/gateway/apicast-scm-1.rockspec index 8d7f2edd9..ee37091d2 100644 --- a/gateway/apicast-scm-1.rockspec +++ b/gateway/apicast-scm-1.rockspec @@ -23,6 +23,7 @@ dependencies = { 'penlight', 'nginx-lua-prometheus == 0.20181120', 'lua-resty-jit-uuid', + 'lua-resty-redis-connector', } build = { type = "make", From a75fb2bfe9496df91dfc259db9fcfec32b5ecb7e Mon Sep 17 00:00:00 2001 From: An Tran Date: Sat, 14 Oct 2023 19:52:50 +1000 Subject: [PATCH 2/5] Replace resty.redis with lua-resty-redis-connector --- gateway/src/apicast/threescale_utils.lua | 35 +++++++++--------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/gateway/src/apicast/threescale_utils.lua b/gateway/src/apicast/threescale_utils.lua index d634178f0..66d557bae 100644 --- a/gateway/src/apicast/threescale_utils.lua +++ b/gateway/src/apicast/threescale_utils.lua @@ -1,7 +1,7 @@ local sub = string.sub local tonumber = tonumber -local redis = require 'resty.redis' +local redis = require 'resty.redis.connector' local env = require 'resty.env' local resty_resolver = require 'resty.resolver' @@ -120,6 +120,7 @@ function _M.resolve(host, port) return ip, port end + function _M.connect_redis(options) local opts = {} @@ -141,36 +142,26 @@ function _M.connect_redis(options) opts.password = options.password end - opts.timeout = options and options.timeout or redis_conf.timeout + opts.connect_timeout = options and options.timeout or redis_conf.timeout local host = opts.host or env.get('REDIS_HOST') or "127.0.0.1" local port = opts.port or env.get('REDIS_PORT') or 6379 + host, port = _M.resolve(host, port) - local red = redis:new() + local rc = redis.new({ + connect_timeout = opts.timeout, - red:set_timeout(opts.timeout) + host = host, + port = port, + db = opts.db, + password = opts.password, + }) - local ok, err = red:connect(_M.resolve(host, port)) - if not ok then + local red, err = rc:connect() + if not red then return nil, _M.error("failed to connect to redis on ", host, ":", port, ": ", err) end - if opts.password then - ok = red:auth(opts.password) - - if not ok then - return nil, _M.error("failed to auth on redis ", host, ":", port) - end - end - - if opts.db then - ok = red:select(opts.db) - - if not ok then - return nil, _M.error("failed to select db ", opts.db, " on redis ", host, ":", port) - end - end - return red end From 736811e7d92484fdb4bc7580a3c3e0ca4630f3d0 Mon Sep 17 00:00:00 2001 From: An Tran Date: Sat, 14 Oct 2023 23:30:46 +1000 Subject: [PATCH 3/5] Adding support for redis sentinel --- .../src/apicast/policy/rate_limit/README.md | 37 ++++++++++++++- .../policy/rate_limit/apicast-policy.json | 13 +++++ .../apicast/policy/rate_limit/rate_limit.lua | 9 ++-- gateway/src/apicast/threescale_utils.lua | 47 ++++++++++--------- 4 files changed, 80 insertions(+), 26 deletions(-) diff --git a/gateway/src/apicast/policy/rate_limit/README.md b/gateway/src/apicast/policy/rate_limit/README.md index 63e175549..ff4944698 100644 --- a/gateway/src/apicast/policy/rate_limit/README.md +++ b/gateway/src/apicast/policy/rate_limit/README.md @@ -139,8 +139,43 @@ regardless of the number of APIcasts deployed, the policy provides the option of using a shared storage. For now, it only supports Redis. To use Redis, we just need to provide the `redis_url` attribute in the config -of the policy: `"redis_url": "redis://a_host:6379"` +of the policy: +The format for connecting directly to Redis is: + +``` +redis://USERNAME:PASSWORD@HOST:PORT/DB +``` + +The USERNAME, PASSWORD and DB fields are optional, all other components are required. + +To connect using Sentinel, use: + +``` +sentinel://USERNAME:PASSWORD@MASTER_NAME:ROLE/DB` +``` + +* USERNAME, PASSWORD and DB are optional. +* MASTER_NAME identifies a group of Redis instance composed of a master and one or more slaves +* ROLE must be either `m` or `s` for master / slave respectively, if ROLE is not specified, the +client will connect to the master + +A table of sentinels must also be supplied + +``` +"redis_url": "sentinel://mymaster:m/1" +"redis_sentinels": [ + { + "url": "redis://10.7.0.1:16379" + }, + { + "url": "redis://10.7.0.2:16379" + }, + { + "url": "redis://10.7.0.3:16379" + }, +] +``` ## Limits with conditions diff --git a/gateway/src/apicast/policy/rate_limit/apicast-policy.json b/gateway/src/apicast/policy/rate_limit/apicast-policy.json index 0868b4d37..f7845466a 100644 --- a/gateway/src/apicast/policy/rate_limit/apicast-policy.json +++ b/gateway/src/apicast/policy/rate_limit/apicast-policy.json @@ -207,6 +207,19 @@ "description": "URL of Redis", "type": "string" }, + "redis_sentiels": { + "type": "array", + "description": "Specify a list of sentinels to connect to", + "items": { + "type": "object", + "properties": { + "url": { + "type": "string", + "description": "The URL of sentinel " + } + } + } + }, "limits_exceeded_error": { "type": "object", "properties": { diff --git a/gateway/src/apicast/policy/rate_limit/rate_limit.lua b/gateway/src/apicast/policy/rate_limit/rate_limit.lua index 55932eccb..20f59e00e 100644 --- a/gateway/src/apicast/policy/rate_limit/rate_limit.lua +++ b/gateway/src/apicast/policy/rate_limit/rate_limit.lua @@ -179,6 +179,7 @@ function _M.new(config) self.leaky_bucket_limiters = config.leaky_bucket_limiters or {} self.fixed_window_limiters = config.fixed_window_limiters or {} self.redis_url = config.redis_url + self.redis_sentinels = config.redis_sentinels or {} self.error_settings = init_error_settings( config.limits_exceeded_error, config.configuration_error) @@ -193,7 +194,7 @@ function _M:access(context) local red if self.redis_url and self.redis_url ~= '' then local rederr - red, rederr = redis_shdict.new{ url = self.redis_url } + red, rederr = redis_shdict.new{ url = self.redis_url , sentinels = self.redis_sentinels } if not red then ngx.log(ngx.ERR, "failed to connect Redis: ", rederr) error(self.error_settings, "configuration_issue") @@ -252,14 +253,14 @@ function _M:access(context) return true, delay end -local function checkin(_, ctx, time, semaphore, redis_url, error_settings) +local function checkin(_, ctx, time, semaphore, redis_url, redis_sentinels, error_settings) local limiters = ctx.limiters local keys = ctx.keys local latency = tonumber(time) local red if redis_url and redis_url ~= '' then local rederr - red, rederr = redis_shdict.new{ url = redis_url } + red, rederr = redis_shdict.new{ url = redis_url , sentinels = redis_sentinels } if not red then ngx.log(ngx.ERR, "failed to connect Redis: ", rederr) error(error_settings, "configuration_issue") @@ -297,7 +298,7 @@ function _M:log() -- lua_max_running_timers -- Also, we need to check that the each timer is a new fake-request, and it -- also consumes memory - local ok, err = ngx.timer.at(0, checkin, ngx.ctx, ngx.var.request_time, semaphore, self.redis_url, self.error_settings) + local ok, err = ngx.timer.at(0, checkin, ngx.ctx, ngx.var.request_time, semaphore, self.redis_url, self.redis_sentinels, self.error_settings) if not ok then ngx.log(ngx.ERR, "Failed to create timer for checkin limits, err='", err, "'") end diff --git a/gateway/src/apicast/threescale_utils.lua b/gateway/src/apicast/threescale_utils.lua index 66d557bae..1e2937f2f 100644 --- a/gateway/src/apicast/threescale_utils.lua +++ b/gateway/src/apicast/threescale_utils.lua @@ -1,14 +1,9 @@ -local sub = string.sub -local tonumber = tonumber - local redis = require 'resty.redis.connector' local env = require 'resty.env' local resty_resolver = require 'resty.resolver' local resty_balancer = require 'resty.balancer' -local resty_url = require 'resty.url' - local _M = {} -- public interface local redis_conf = { @@ -126,36 +121,46 @@ function _M.connect_redis(options) local url = options and options.url or env.get('REDIS_URL') - if url then - url = resty_url.split(url, 'redis') - if url then - opts.host = url[4] - opts.port = url[5] - opts.db = url[6] and tonumber(sub(url[6], 2)) - opts.password = url[3] or url[2] + local params, err = redis.parse_dsn({url=url}) + if err then + return nil, _M.error("invalid redis url ", err) end + opts = params or {} elseif options then opts.host = options.host opts.port = options.port opts.db = options.db opts.password = options.password + opts.master_name= options.master_name + opts.role = options.role end opts.connect_timeout = options and options.timeout or redis_conf.timeout + opts.keepalive_timeout = options and options.keepalive_timeout or redis_conf.keepalive + opts.keepalive_poolsize = options and options.keepalive_poolsize or redis_conf.keepalive_poolsize local host = opts.host or env.get('REDIS_HOST') or "127.0.0.1" local port = opts.port or env.get('REDIS_PORT') or 6379 - host, port = _M.resolve(host, port) + opts.host, opts.port = _M.resolve(host, port) - local rc = redis.new({ - connect_timeout = opts.timeout, + if options.sentinels and #options.sentinels > 0 then + local sentinels = {} + + for i, sentinel in ipairs(options.sentinels) do + local params, err = redis.parse_dsn({url=sentinel.url}) + if err then + return nil, _M.error("invalid redis url ", err) + end + + params.host, params.port = _M.resolve(params.host, params.port) + sentinels[i] = params + end + + opts.sentinels = sentinels + end - host = host, - port = port, - db = opts.db, - password = opts.password, - }) + local rc = redis.new(opts) local red, err = rc:connect() if not red then @@ -167,7 +172,7 @@ end -- return ownership of this connection to the pool function _M.release_redis(red) - red:set_keepalive(redis_conf.keepalive, redis_conf.poolsize) + redis:set_keepalive(red) end local xml_header_len = string.len('') From 5cb6870460116ed30b6c8569df12e5e5996fee3d Mon Sep 17 00:00:00 2001 From: An Tran Date: Wed, 18 Oct 2023 23:16:00 +1000 Subject: [PATCH 4/5] Add docker support for redis sentinel --- docker-compose-devel.yml | 34 ++++++++++++++++++++++++++++++++++ redis-sentinel/Dockerfile | 15 +++++++++++++++ redis-sentinel/entrypoint.sh | 2 ++ redis-sentinel/sentinel.conf | 9 +++++++++ 4 files changed, 60 insertions(+) create mode 100644 redis-sentinel/Dockerfile create mode 100644 redis-sentinel/entrypoint.sh create mode 100644 redis-sentinel/sentinel.conf diff --git a/docker-compose-devel.yml b/docker-compose-devel.yml index 7a2efe08f..bdaccec9d 100644 --- a/docker-compose-devel.yml +++ b/docker-compose-devel.yml @@ -13,6 +13,11 @@ services: environment: EDITOR: vi TEST_NGINX_REDIS_HOST: redis + TEST_NGINX_REDIS_MASTER: redismaster + TEST_NGINX_REDIS_SENTINEL_1_HOST: sentinel-1 + TEST_NGINX_REDIS_SENTINEL_2_HOST: sentinel-2 + TEST_NGINX_REDIS_SENTINEL_3_HOST: sentinel-3 + TEST_NGINX_REDIS_SENTINEL_PORT: 5000 TEST_NGINX_BINARY: openresty PROJECT_PATH: /opt/app-root/src TEST_NGINX_APICAST_PATH: /opt/app-root/src/gateway @@ -23,3 +28,32 @@ services: GIT_COMMITTER_EMAIL: ${GIT_COMMITTER_EMAIL:-""} redis: image: redis + redis-master: + image: redis + redis-slave-1: + image: redis + command: redis-server --slaveof redis-master 6379 + redis-slave-2: + image: redis + command: redis-server --slaveof redis-master 6379 + sentinel-1: + build: + context: ./redis-sentinel + depends_on: + - redis-master + - redis-slave-1 + - redis-slave-2 + sentinel-2: + build: + context: ./redis-sentinel + depends_on: + - redis-master + - redis-slave-1 + - redis-slave-2 + sentinel-3: + build: + context: ./redis-sentinel + depends_on: + - redis-master + - redis-slave-1 + - redis-slave-2 diff --git a/redis-sentinel/Dockerfile b/redis-sentinel/Dockerfile new file mode 100644 index 000000000..534e2cb8b --- /dev/null +++ b/redis-sentinel/Dockerfile @@ -0,0 +1,15 @@ +FROM redis + +RUN mkdir -p /redis + +WORKDIR /redis + +COPY sentinel.conf . +COPY entrypoint.sh /usr/local/bin/ + +RUN chown redis:redis /redis/* && \ + chmod +x /usr/local/bin/entrypoint.sh + +EXPOSE 5000 + +ENTRYPOINT ["entrypoint.sh"] diff --git a/redis-sentinel/entrypoint.sh b/redis-sentinel/entrypoint.sh new file mode 100644 index 000000000..fcc4d7990 --- /dev/null +++ b/redis-sentinel/entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/sh +redis-server /redis/sentinel.conf --sentinel diff --git a/redis-sentinel/sentinel.conf b/redis-sentinel/sentinel.conf new file mode 100644 index 000000000..25f318ef5 --- /dev/null +++ b/redis-sentinel/sentinel.conf @@ -0,0 +1,9 @@ +port 5000 + +dir /tmp + +sentinel resolve-hostnames yes +sentinel monitor redismaster redis-master 6379 2 +sentinel down-after-milliseconds redismaster 5000 +sentinel parallel-syncs redismaster 1 +sentinel failover-timeout redismaster 60000 From 224d858764423c8f2ee3fb31f1e2734ba78112b5 Mon Sep 17 00:00:00 2001 From: An Tran Date: Thu, 19 Oct 2023 13:49:31 +1000 Subject: [PATCH 5/5] Add unittest --- spec/policy/rate_limit/rate_limit_spec.lua | 459 ++++++++++++++----- spec/policy/rate_limit/redis_shdict_spec.lua | 124 +++++ spec/threescale_utils_spec.lua | 99 ++++ 3 files changed, 573 insertions(+), 109 deletions(-) diff --git a/spec/policy/rate_limit/rate_limit_spec.lua b/spec/policy/rate_limit/rate_limit_spec.lua index f4dce6594..8bf5c0c90 100644 --- a/spec/policy/rate_limit/rate_limit_spec.lua +++ b/spec/policy/rate_limit/rate_limit_spec.lua @@ -24,10 +24,24 @@ local ts = require ('apicast.threescale_utils') local redis_host = env.get('TEST_NGINX_REDIS_HOST') or '127.0.0.1' local redis_port = env.get('TEST_NGINX_REDIS_PORT') or 6379 +local redis_master = env.get('TEST_NGINX_REDIS_MASTER') +local redis_sentinel1_host = env.get('TEST_NGINX_REDIS_SENTINEL_1_HOST') +local redis_sentinel2_host = env.get('TEST_NGINX_REDIS_SENTINEL_2_HOST') +local redis_sentinel3_host = env.get('TEST_NGINX_REDIS_SENTINEL_3_HOST') +local redis_sentinel_port = env.get('TEST_NGINX_REDIS_SENTINEL_PORT') local redis_url = 'redis://'..redis_host..':'..redis_port..'/1' local redis = ts.connect_redis{ url = redis_url } + +local redis_sentinel_url = "sentinel://"..redis_master..":a/2" +local sentinels = { + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, +} +local redis_sentinel = ts.connect_redis({url=redis_sentinel_url, sentinels=sentinels}) + describe('Rate limit policy', function() local context @@ -113,6 +127,10 @@ describe('Rate limit policy', function() end) describe('using #redis', function() + before_each(function() + redis_sentinel:flushdb() + end) + it('works with multiple limiters', function() local rate_limit_policy = RateLimitPolicy.new({ connection_limiters = { @@ -154,174 +172,397 @@ describe('Rate limit policy', function() assert(rate_limit_policy:access(context)) end) + it('invalid redis sentinel', function() + local rate_limit_policy = RateLimitPolicy.new({ + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 20, burst = 10, delay = 0.5 } + }, + redis_url = 'sentinel://invalidhost.master:a/2', + redis_sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + }) + + assert.returns_error('failed to connect to redis on 127.0.0.1:6379: invalid master name', rate_limit_policy:access(context)) + + assert.spy(ngx.exit).was_called_with(500) + end) + + it('redis sentinel with empty redis url', function() + local rate_limit_policy = RateLimitPolicy.new({ + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 20, burst = 10, delay = 0.5 } + }, + redis_url = '', + redis_sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + }) + assert(rate_limit_policy:access(context)) + end) + + it('redis sentinel with no valid sentinel url', function() + local rate_limit_policy = RateLimitPolicy.new({ + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 20, burst = 10, delay = 0.5 } + }, + redis_url="sentinel://"..redis_master..":a/2", + redis_sentinels={ + {url="redis://invalid.sentinel-1:5000"}, + {url="redis://invalid.sentinel-2:5000"}, + {url="redis://invalid.sentinel-3:5000"}, + } + }) + assert.returns_error('failed to connect to redis on 127.0.0.1:6379: no hosts available', rate_limit_policy:access(context)) + + assert.spy(ngx.exit).was_called_with(500) + end) + + it('mix of normal redis url and sentinel', function() + local rate_limit_policy = RateLimitPolicy.new({ + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 20, burst = 10, delay = 0.5 } + }, + redis_url="sentinel://"..redis_master..":a/2", + redis_sentinels={ + {url=redis_url}, + {url=redis_url}, + {url=redis_url} + } + }) + assert.returns_error("failed to connect to redis on 127.0.0.1:6379: ERR unknown command 'sentinel', with args beginning with: 'get-master-addr-by-name' 'redismaster' ", rate_limit_policy:access(context)) + + assert.spy(ngx.exit).was_called_with(500) + end) + describe('rejection', function() it('rejected (conn)', function() - local rate_limit_policy = RateLimitPolicy.new({ - connection_limiters = { - { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 0, delay = 0.5 } - }, - redis_url = redis_url - }) + local configs = { + redis = { + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 0, delay = 0.5 } + }, + redis_url = redis_url + }, + redis_sentinel = { + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 0, delay = 0.5 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } + for _, conf in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(conf) - assert(rate_limit_policy:access(context)) - assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert.returns_error('limits exceeded', rate_limit_policy:access(context)) - assert.spy(ngx.exit).was_called_with(429) + assert.spy(ngx.exit).was_called_with(429) + end end) it('rejected (req)', function() - local rate_limit_policy = RateLimitPolicy.new({ - leaky_bucket_limiters = { - { key = { name = 'test2foofoo', scope = 'global' }, rate = 1, burst = 0 } + local configs = { + redis = { + leaky_bucket_limiters = { + { key = { name = 'test2foofoo', scope = 'global' }, rate = 1, burst = 0 } + }, + redis_url = redis_url }, - redis_url = redis_url - }) + redis_sentinel = { + leaky_bucket_limiters = { + { key = { name = 'test2foofoo', scope = 'global' }, rate = 1, burst = 0 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } - assert(rate_limit_policy:access(context)) - assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + for _, conf in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(conf) - assert.spy(ngx.exit).was_called_with(429) + assert(rate_limit_policy:access(context)) + assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + + assert.spy(ngx.exit).was_called_with(429) + end end) it('rejected (count), name_type is plain', function() - local rate_limit_policy = RateLimitPolicy.new({ - fixed_window_limiters = { - { key = { name = 'test3', name_type = 'plain', scope = 'global' }, count = 1, window = 10 } + local configs = { + redis = { + conf = { + fixed_window_limiters = { + { key = { name = 'test3', name_type = 'plain', scope = 'global' }, count = 1, window = 10 } + }, + redis_url = redis_url + }, + redis_client = redis }, - redis_url = redis_url - }) + redis_sentinel = { + conf = { + fixed_window_limiters = { + { key = { name = 'test3', name_type = 'plain', scope = 'global' }, count = 1, window = 10 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + }, + redis_client = redis_sentinel + } + } + for _, config in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(config.conf) - assert(rate_limit_policy:access(context)) - assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert.returns_error('limits exceeded', rate_limit_policy:access(context)) - assert.equal('1', redis:get('11110_fixed_window_test3')) - assert.spy(ngx.exit).was_called_with(429) + assert.equal('1', config.redis_client:get('11110_fixed_window_test3')) + assert.spy(ngx.exit).was_called_with(429) + end end) it('rejected (count), name_type is liquid', function() - local ctx = { service = { id = 5 }, var_in_context = 'test3' } - local rate_limit_policy = RateLimitPolicy.new({ - fixed_window_limiters = { - { key = { name = '{{ var_in_context }}', name_type = 'liquid', scope = 'global' }, - count = 1, window = 10 } + local configs = { + redis = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ var_in_context }}', name_type = 'liquid', scope = 'global' }, + count = 1, window = 10 } + }, + redis_url = redis_url + }, + redis_client = redis }, - redis_url = redis_url - }) + redis_sentinel = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ var_in_context }}', name_type = 'liquid', scope = 'global' }, + count = 1, window = 10 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + }, + redis_client = redis_sentinel + } + } + for _, config in pairs(configs) do + local ctx = { service = { id = 5 }, var_in_context = 'test3' } + local rate_limit_policy = RateLimitPolicy.new(config.conf) - assert(rate_limit_policy:access(ctx)) - assert.returns_error('limits exceeded', rate_limit_policy:access(ctx)) + assert(rate_limit_policy:access(ctx)) + assert.returns_error('limits exceeded', rate_limit_policy:access(ctx)) - assert.equal('1', redis:get('11110_fixed_window_test3')) - assert.spy(ngx.exit).was_called_with(429) + assert.equal('1', config.redis_client:get('11110_fixed_window_test3')) + assert.spy(ngx.exit).was_called_with(429) + end end) it('rejected (count), name_type is liquid and refers to var in the context', function() - local test_host = 'some_host' - ngx_variable.available_context:revert() - stub(ngx_variable, 'available_context', function() - return { host = test_host } - end) - - local rate_limit_policy = RateLimitPolicy.new({ - fixed_window_limiters = { - { key = { name = '{{ host }}', name_type = 'liquid', scope = 'global' }, count = 1, window = 10 } + local configs = { + redis = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ host }}', name_type = 'liquid', scope = 'global' }, count = 1, window = 10 } + }, + redis_url = redis_url + }, + redis_client = redis }, - redis_url = redis_url - }) + redis_sentinel = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ host }}', name_type = 'liquid', scope = 'global' }, count = 1, window = 10 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + }, + redis_client = redis_sentinel + } + } + for _, config in pairs(configs) do + local test_host = 'some_host' + ngx_variable.available_context:revert() + stub(ngx_variable, 'available_context', function() + return { host = test_host } + end) - assert(rate_limit_policy:access(context)) - assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + local rate_limit_policy = RateLimitPolicy.new(config.conf) - assert.equal('1', redis:get('11110_fixed_window_' .. test_host)) - assert.spy(ngx.exit).was_called_with(429) + assert(rate_limit_policy:access(context)) + assert.returns_error('limits exceeded', rate_limit_policy:access(context)) + + assert.equal('1', config.redis_client:get('11110_fixed_window_' .. test_host)) + assert.spy(ngx.exit).was_called_with(429) + end end) it('rejected (count), multi limiters', function() - local ctx = { - service = { id = 5 }, var1 = 'test3_1', var2 = 'test3_2', var3 = 'test3_3' } - local rate_limit_policy = RateLimitPolicy.new({ - fixed_window_limiters = { - { key = { name = '{{ var1 }}', name_type = 'liquid' }, count = 1, window = 10 }, - { key = { name = '{{ var2 }}', name_type = 'liquid' }, count = 2, window = 10 }, - { key = { name = '{{ var3 }}', name_type = 'liquid' }, count = 3, window = 10 } + local configs = { + redis = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ var1 }}', name_type = 'liquid' }, count = 1, window = 10 }, + { key = { name = '{{ var2 }}', name_type = 'liquid' }, count = 2, window = 10 }, + { key = { name = '{{ var3 }}', name_type = 'liquid' }, count = 3, window = 10 } + }, + redis_url = redis_url + }, + redis_client = redis }, - redis_url = redis_url - }) - - assert(rate_limit_policy:access(ctx)) - assert.returns_error('limits exceeded', rate_limit_policy:access(ctx)) - - assert.equal('1', redis:get('11110_5_fixed_window_test3_1')) - assert.equal('1', redis:get('11110_5_fixed_window_test3_2')) - assert.equal('1', redis:get('11110_5_fixed_window_test3_3')) - assert.spy(ngx.exit).was_called_with(429) + redis_sentinel = { + conf = { + fixed_window_limiters = { + { key = { name = '{{ var1 }}', name_type = 'liquid' }, count = 1, window = 10 }, + { key = { name = '{{ var2 }}', name_type = 'liquid' }, count = 2, window = 10 }, + { key = { name = '{{ var3 }}', name_type = 'liquid' }, count = 3, window = 10 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + }, + redis_client = redis_sentinel + } + } + for _, config in pairs(configs) do + local ctx = { + service = { id = 5 }, var1 = 'test3_1', var2 = 'test3_2', var3 = 'test3_3' } + local rate_limit_policy = RateLimitPolicy.new(config.conf) + + assert(rate_limit_policy:access(ctx)) + assert.returns_error('limits exceeded', rate_limit_policy:access(ctx)) + + assert.equal('1', config.redis_client:get('11110_5_fixed_window_test3_1')) + assert.equal('1', config.redis_client:get('11110_5_fixed_window_test3_2')) + assert.equal('1', config.redis_client:get('11110_5_fixed_window_test3_3')) + assert.spy(ngx.exit).was_called_with(429) + end end) end) describe('delay', function() it('delay (conn)', function() - local rate_limit_policy = RateLimitPolicy.new({ - connection_limiters = { - { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 1, delay = 2 } + local configs = { + redis = { + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 1, delay = 2 } + }, + redis_url = redis_url }, - redis_url = redis_url - }) + redis_sentinel = { + connection_limiters = { + { key = { name = 'test1', scope = 'global' }, conn = 1, burst = 1, delay = 2 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } + for _, config in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(config) - assert(rate_limit_policy:access(context)) - assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) - assert.spy(ngx.sleep).was_called_with(match.is_gt(0.000)) + assert.spy(ngx.sleep).was_called_with(match.is_gt(0.000)) + end end) it('delay (req)', function() - local rate_limit_policy = RateLimitPolicy.new({ - leaky_bucket_limiters = { - { key = { name = 'test2', scope = 'global' }, rate = 1, burst = 1 } + local configs = { + redis = { + leaky_bucket_limiters = { + { key = { name = 'test2', scope = 'global' }, rate = 1, burst = 1 } + }, + redis_url = redis_url }, - redis_url = redis_url - }) + redis_sentinel = { + leaky_bucket_limiters = { + { key = { name = 'test2', scope = 'global' }, rate = 1, burst = 1 } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } + for _, config in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(config) - assert(rate_limit_policy:access(context)) - assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) - assert.spy(ngx.sleep).was_called_with(match.is_gt(0.000)) + assert.spy(ngx.sleep).was_called_with(match.is_gt(0.000)) + end end) it('delay (req) service scope', function() - local rate_limit_policy = RateLimitPolicy.new({ - leaky_bucket_limiters = { - { - key = { name = 'test4', scope = 'service' }, - rate = 1, - burst = 1 - } + local configs = { + redis = { + leaky_bucket_limiters = { + { + key = { name = 'test4', scope = 'service' }, + rate = 1, + burst = 1 + } + }, + redis_url = redis_url }, - redis_url = redis_url - }) + redis_sentinel = { + leaky_bucket_limiters = { + { + key = { name = 'test4', scope = 'service' }, + rate = 1, + burst = 1 + } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } + for _, config in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(config) - assert(rate_limit_policy:access(context)) - assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) - assert.spy(ngx.sleep).was_called_with(match.is_gt(0.001)) + assert.spy(ngx.sleep).was_called_with(match.is_gt(0.001)) + end end) it('delay (req) default service scope', function() - local rate_limit_policy = RateLimitPolicy.new({ - leaky_bucket_limiters = { - { - key = { name = 'test4' }, - rate = 1, - burst = 1 - } + local configs = { + redis = { + leaky_bucket_limiters = { + { + key = { name = 'test4' }, + rate = 1, + burst = 1 + } + }, + redis_url = redis_url }, - redis_url = redis_url - }) + redis_sentinel = { + leaky_bucket_limiters = { + { + key = { name = 'test4' }, + rate = 1, + burst = 1 + } + }, + redis_url = redis_sentinel_url, + redis_sentinels = sentinels + } + } + for _, config in pairs(configs) do + local rate_limit_policy = RateLimitPolicy.new(config) - assert(rate_limit_policy:access(context)) - assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) + assert(rate_limit_policy:access(context)) - assert.spy(ngx.sleep).was_called_with(match.is_gt(0.001)) + assert.spy(ngx.sleep).was_called_with(match.is_gt(0.001)) + end end) end) end) diff --git a/spec/policy/rate_limit/redis_shdict_spec.lua b/spec/policy/rate_limit/redis_shdict_spec.lua index 8df6505cb..61578e902 100644 --- a/spec/policy/rate_limit/redis_shdict_spec.lua +++ b/spec/policy/rate_limit/redis_shdict_spec.lua @@ -3,6 +3,11 @@ local ts = require ('apicast.threescale_utils') local redis_host = env.get('TEST_NGINX_REDIS_HOST') or '127.0.0.1' local redis_port = env.get('TEST_NGINX_REDIS_PORT') or 6379 +local redis_master = env.get('TEST_NGINX_REDIS_MASTER') +local redis_sentinel1_host = env.get('TEST_NGINX_REDIS_SENTINEL_1_HOST') +local redis_sentinel2_host = env.get('TEST_NGINX_REDIS_SENTINEL_2_HOST') +local redis_sentinel3_host = env.get('TEST_NGINX_REDIS_SENTINEL_3_HOST') +local redis_sentinel_port = env.get('TEST_NGINX_REDIS_SENTINEL_PORT') local redis_shdict = require('apicast.policy.rate_limit.redis_shdict') @@ -117,3 +122,122 @@ describe('Redis Shared Dictionary', function() end) end) end) + +describe('Redis Shared Dictionary with redis Sentinel', function() + local redis_sentinel + local shdict + + before_each(function() + local options = { + url="sentinel://"..redis_master..":a/2", + sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + } + redis_sentinel = assert(ts.connect_redis(options)) + shdict = assert(redis_shdict.new(options)) + + assert(redis_sentinel:flushdb()) + end) + + describe('flush_all', function() + it('removes all records', function() + assert(redis_sentinel:set('foo', 'bar')) + assert.equal('bar', redis_sentinel:get('foo')) + + shdict:flush_all() + + assert.equal(ngx.null, redis_sentinel:get('foo')) + end) + end) + + describe('incr', function() + pending('without default', function() + local ret, err = shdict:incr('somekey', 2) + + -- TODO: nginx shdict:incr returns: nil, 'not found' when the key does not exist + assert.is_nil(ret) + assert.equal('not found', err) + end) + + it('has init value', function() + local ret = shdict:incr('somekey', 2, 3) + + assert.equal(5, ret) + assert.equal('5', redis_sentinel:get('somekey')) + end) + + it('has init_ttl value', function() + shdict:incr('somekey', 0, 0, 1) + + assert.near(1, redis_sentinel:ttl('somekey'), 0.1) + end) + + it('increments existing value', function() + redis_sentinel:set('somekey', 3) + + local ret = assert(shdict:incr('somekey', 2)) + + assert.equal(5, ret) + end) + end) + + describe('get', function() + it('returns existing existing integer', function() + redis_sentinel:set('somekey', 1) + + local ret = shdict:get('somekey') + + assert.equal(1, ret) + end) + + it('returns existing existing string', function() + redis_sentinel:set('somekey', 'str') + + local ret = shdict:get('somekey') + + assert.equal('str', ret) + end) + + it('returns nil on missing value', function() + local ret = { shdict:get('missing') } + + assert.equal(0, #ret) + end) + end) + + describe('set', function() + it('overrides existing value', function() + redis_sentinel:set('somekey', 'str') + + assert(shdict:set('somekey', 'somevalue')) + + assert.equal('somevalue', redis_sentinel:get('somekey')) + end) + + it('sets value when missing', function() + assert(shdict:set('somekey', 'foo')) + + assert.equal('foo', redis_sentinel:get('somekey')) + end) + end) + + describe('expire', function() + it('returns an error on missing key', function() + local ok, err = shdict:expire('somekey', 1) + + assert.is_nil(ok) + assert.equal('not found', err) + end) + + it('sets value when missing', function() + shdict:set('somekey', 'value') + + assert(shdict:expire('somekey', 1)) + + assert.near(1, redis_sentinel:ttl('somekey'), 0.1) + end) + end) +end) diff --git a/spec/threescale_utils_spec.lua b/spec/threescale_utils_spec.lua index 04ca1cf36..bb6ea9396 100644 --- a/spec/threescale_utils_spec.lua +++ b/spec/threescale_utils_spec.lua @@ -1,4 +1,14 @@ local _M = require('apicast.threescale_utils') +local env = require('resty.env') + +local redis_host = env.get('TEST_NGINX_REDIS_HOST') or '127.0.0.1' +local redis_port = env.get('TEST_NGINX_REDIS_PORT') or 6379 +local redis_url = 'redis://'..redis_host..':'..redis_port..'/1' +local redis_master = env.get('TEST_NGINX_REDIS_MASTER') +local redis_sentinel1_host = env.get('TEST_NGINX_REDIS_SENTINEL_1_HOST') +local redis_sentinel2_host = env.get('TEST_NGINX_REDIS_SENTINEL_2_HOST') +local redis_sentinel3_host = env.get('TEST_NGINX_REDIS_SENTINEL_3_HOST') +local redis_sentinel_port = env.get('TEST_NGINX_REDIS_SENTINEL_PORT') describe('3scale utils', function() describe('.error', function() @@ -11,4 +21,93 @@ describe('3scale utils', function() assert.equal('one two three', error) end) end) + + describe('using #redis', function() + it('redis url', function() + local redis, err = _M.connect_redis({url=redis_url}) + assert(redis) + end) + it('invalid redis url', function() + assert.returns_error("failed to connect to redis on invalid.domain:6379: invalid.domain could not be resolved (3: Host not found)", _M.connect_redis({url='redis://invalid.domain:6379/1'})) + end) + end) + + describe('using #redis-sentinel', function() + it('redis sentinel', function() + local redis, err = _M.connect_redis({ + url="sentinel://"..redis_master..":a/2", + sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + }) + assert(redis) + end) + it('redis sentinel with empty url', function() + local opts = { + url="", + sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + } + assert.returns_error("failed to connect to redis on 127.0.0.1:6379: invalid master name", _M.connect_redis(opts)) + end) + it('redis sentinel with invalid url', function() + local opts = { + url="redis://invalid.domain:6379/1", + sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + } + assert.returns_error("failed to connect to redis on invalid.domain:6379: invalid master name", _M.connect_redis(opts)) + end) + it('redis sentinel with invalid master name', function() + local opts = { + url="sentinel://invalid.master:a/2", + sentinels={ + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel2_host..":"..redis_sentinel_port}, + {url="redis://"..redis_sentinel3_host..":"..redis_sentinel_port}, + } + } + assert.returns_error("failed to connect to redis on 127.0.0.1:6379: invalid master name", _M.connect_redis(opts)) + end) + it('redis sentinel with one valid sentinel', function() + local opts = { + url="sentinel://"..redis_master..":a/2", + sentinels={ + {url="redis://invalid.sentinel-2:5000"}, + {url="redis://"..redis_sentinel1_host..":"..redis_sentinel_port}, + {url="redis://invalid.sentinel-3:5000"}, + } + } + local redis, err = _M.connect_redis(opts) + assert(redis) + end) + it('redis sentinel with no valid sentinels', function() + local opts = { + url="sentinel://"..redis_master..":a/2", + sentinels={ + {url="redis://invalid.sentinel-1:5000"}, + {url="redis://invalid.sentinel-2:5000"}, + {url="redis://invalid.sentinel-3:5000"}, + } + } + assert.returns_error("failed to connect to redis on 127.0.0.1:6379: no hosts available", _M.connect_redis(opts)) + end) + it('redis sentinel with normal redis', function() + local opts = { + url="sentinel://"..redis_master..":a/2", + sentinels={ + {url=redis_url}, + } + } + assert.returns_error("failed to connect to redis on 127.0.0.1:6379: ERR unknown command 'sentinel', with args beginning with: 'get-master-addr-by-name' 'redismaster' ", _M.connect_redis(opts)) + end) + end) end)