1
0
mirror of https://github.com/1Panel-dev/1Panel.git synced 2025-01-19 00:09:16 +08:00

feat(waf): 优化配置文件读取方式 (#4288)

This commit is contained in:
zhengkunwang 2024-03-23 22:04:06 +08:00 committed by GitHub
parent 1b2208a4d0
commit 33309d2d93
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 1728 additions and 14 deletions

View File

@ -3,6 +3,7 @@ local lfs = require "lfs"
local utils = require "utils"
local cjson = require "cjson"
local read_rule = file_utils.read_rule
local read_file2string = file_utils.read_file2string
local read_file2table = file_utils.read_file2table
@ -81,10 +82,11 @@ local function load_ip_group()
ip_group_list[entry] = group_value
end
end
local waf_dict = ngx.shared.waf
local ok , err = waf_dict:set("ip_group_list", cjson.encode(ip_group_list))
if not ok then
ngx.log(ngx.ERR, "Failed to set ip_group_list",err)
local ok, err = cache:set("ip_group_list", {
ipc_shm = "ipc_shared_dict",
},ip_group_list)
if not ok then
ngx.log(ngx.ERR, "Failed to set config",err)
end
end
@ -136,20 +138,22 @@ function _M.load_config_file()
init_sites_config()
load_ip_group()
local waf_dict = ngx.shared.waf
local ok,err = waf_dict:set("config", cjson.encode(config))
if not ok then
local ok, err = cache:set("config", {
ipc_shm = "ipc_shared_dict",
},config)
if not ok then
ngx.log(ngx.ERR, "Failed to set config",err)
end
end
local function get_config()
local waf_dict = ngx.shared.waf
local cache_config = waf_dict:get("config")
local cache_config = cache:get("config", {
ipc_shm = "ipc_shared_dict",
})
if not cache_config then
return config
end
return cjson.decode(cache_config)
return cache_config
end
function _M.get_site_config(website_key)

View File

@ -1,5 +1,16 @@
local db = require "db"
local config = require "config"
local mlcache = require "resty.mlcache"
local cache, err = mlcache.new("config", "waf", {
lru_size = 1000,
ipc_shm = "ipc_shared_dict",
})
if not cache then
error("could not create mlcache: " .. err)
end
_G.cache = cache
config.load_config_file()
db.init()
@ -7,3 +18,4 @@ db.init()

View File

@ -122,13 +122,13 @@ local function match_ip(ip_rule, ip, ipn)
if ip_rule.ipGroup == nil or ip_rule.ipGroup == "" then
return false
end
local waf_dict = ngx.shared.waf
local ip_group_list = waf_dict:get("ip_group_list")
local ip_group_list = cache:get("ip_group_list", {
ipc_shm = "ipc_shared_dict",
})
if ip_group_list == nil then
return false
end
local ip_group_obj = cjson.decode(ip_group_list)
local ip_group = ip_group_obj[ip_rule.ipGroup]
local ip_group = ip_group_list[ip_rule.ipGroup]
if ip_group == nil then
return false
end

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,257 @@
-- vim: ts=4 sts=4 sw=4 et:
local ERR = ngx.ERR
local WARN = ngx.WARN
local INFO = ngx.INFO
local sleep = ngx.sleep
local shared = ngx.shared
local worker_pid = ngx.worker.pid
local ngx_log = ngx.log
local fmt = string.format
local sub = string.sub
local find = string.find
local min = math.min
local type = type
local pcall = pcall
local error = error
local insert = table.insert
local tonumber = tonumber
local setmetatable = setmetatable
local INDEX_KEY = "lua-resty-ipc:index"
local FORCIBLE_KEY = "lua-resty-ipc:forcible"
local POLL_SLEEP_RATIO = 2
local function marshall(worker_pid, channel, data)
return fmt("%d:%d:%s%s", worker_pid, #data, channel, data)
end
local function unmarshall(str)
local sep_1 = find(str, ":", nil , true)
local sep_2 = find(str, ":", sep_1 + 1, true)
local pid = tonumber(sub(str, 1 , sep_1 - 1))
local data_len = tonumber(sub(str, sep_1 + 1, sep_2 - 1))
local channel_last_pos = #str - data_len
local channel = sub(str, sep_2 + 1, channel_last_pos)
local data = sub(str, channel_last_pos + 1)
return pid, channel, data
end
local function log(lvl, ...)
return ngx_log(lvl, "[ipc] ", ...)
end
local _M = {}
local mt = { __index = _M }
function _M.new(shm, debug)
local dict = shared[shm]
if not dict then
return nil, "no such lua_shared_dict: " .. shm
end
local self = {
dict = dict,
pid = debug and 0 or worker_pid(),
idx = 0,
callbacks = {},
}
return setmetatable(self, mt)
end
function _M:subscribe(channel, cb)
if type(channel) ~= "string" then
error("channel must be a string", 2)
end
if type(cb) ~= "function" then
error("callback must be a function", 2)
end
if not self.callbacks[channel] then
self.callbacks[channel] = { cb }
else
insert(self.callbacks[channel], cb)
end
end
function _M:broadcast(channel, data)
if type(channel) ~= "string" then
error("channel must be a string", 2)
end
if type(data) ~= "string" then
error("data must be a string", 2)
end
local marshalled_event = marshall(worker_pid(), channel, data)
local idx, err = self.dict:incr(INDEX_KEY, 1, 0)
if not idx then
return nil, "failed to increment index: " .. err
end
local ok, err, forcible = self.dict:set(idx, marshalled_event)
if not ok then
return nil, "failed to insert event in shm: " .. err
end
if forcible then
-- take note that eviction has started
-- we repeat this flagging to avoid this key from ever being
-- evicted itself
local ok, err = self.dict:set(FORCIBLE_KEY, true)
if not ok then
return nil, "failed to set forcible flag in shm: " .. err
end
end
return true
end
-- Note: if this module were to be used by users (that is, users can implement
-- their own pub/sub events and thus, callbacks), this method would then need
-- to consider the time spent in callbacks to prevent long running callbacks
-- from penalizing the worker.
-- Since this module is currently only used by mlcache, whose callback is an
-- shm operation, we only worry about the time spent waiting for events
-- between the 'incr()' and 'set()' race condition.
function _M:poll(timeout)
if timeout ~= nil and type(timeout) ~= "number" then
error("timeout must be a number", 2)
end
local shm_idx, err = self.dict:get(INDEX_KEY)
if err then
return nil, "failed to get index: " .. err
end
if shm_idx == nil then
-- no events to poll yet
return true
end
if type(shm_idx) ~= "number" then
return nil, "index is not a number, shm tampered with"
end
if not timeout then
timeout = 0.3
end
if self.idx == 0 then
local forcible, err = self.dict:get(FORCIBLE_KEY)
if err then
return nil, "failed to get forcible flag from shm: " .. err
end
if forcible then
-- shm lru eviction occurred, we are likely a new worker
-- skip indexes that may have been evicted and resume current
-- polling idx
self.idx = shm_idx - 1
end
else
-- guard: self.idx <= shm_idx
self.idx = min(self.idx, shm_idx)
end
local elapsed = 0
for _ = self.idx, shm_idx - 1 do
-- fetch event from shm with a retry policy in case
-- we run our :get() in between another worker's
-- :incr() and :set()
local v
local idx = self.idx + 1
do
local perr
local pok = true
local sleep_step = 0.001
while elapsed < timeout do
v, err = self.dict:get(idx)
if v ~= nil or err then
break
end
if pok then
log(INFO, "no event data at index '", idx, "', ",
"retrying in: ", sleep_step, "s")
-- sleep is not available in all ngx_lua contexts
-- if we fail once, never retry to sleep
pok, perr = pcall(sleep, sleep_step)
if not pok then
log(WARN, "could not sleep before retry: ", perr,
" (note: it is safer to call this function ",
"in contexts that support the ngx.sleep() ",
"API)")
end
end
elapsed = elapsed + sleep_step
sleep_step = min(sleep_step * POLL_SLEEP_RATIO,
timeout - elapsed)
end
end
-- fetch next event on next iteration
-- even if we timeout, we might miss 1 event (we return in timeout and
-- we don't retry that event), but it's better than being stuck forever
-- on an event that might have been evicted from the shm.
self.idx = idx
if elapsed >= timeout then
return nil, "timeout"
end
if err then
log(ERR, "could not get event at index '", self.idx, "': ", err)
elseif type(v) ~= "string" then
log(ERR, "event at index '", self.idx, "' is not a string, ",
"shm tampered with")
else
local pid, channel, data = unmarshall(v)
if self.pid ~= pid then
-- coming from another worker
local cbs = self.callbacks[channel]
if cbs then
for j = 1, #cbs do
local pok, perr = pcall(cbs[j], data)
if not pok then
log(ERR, "callback for channel '", channel,
"' threw a Lua error: ", perr)
end
end
end
end
end
end
return true
end
return _M