问题背景
k8s中的nginx ingress以daemonset运行,但是访问入口却直指了一个,出现问题,当该节点ingress重启时会出现短暂服务不可用状态,实现静态负载均衡,有感觉增加节点需要时常修改配置,刚好了解了下openresty,以nginx为容器实现lua编程,以openresty实现动态负载均衡是可选的方案,架构设计如下:
实践
1 | #创建项目 |
2 | ~]# mkdir /ddhome/kube-upstream -p |
3 | ~]# mkdir /ddhome/kube-upstream/{config,logs,lua} |
config/nginx.conf
1
worker_processes auto;
2
3
events {
4
worker_connections 65535;
5
use epoll;
6
}
7
8
include vhosts/*.conf;
config/vhosts/kube.conf
1
stream {
2
log_format proxy '$remote_addr [$time_local] '
3
'$protocol $status $bytes_sent $bytes_received '
4
'$session_time "$upstream_addr" '
5
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
6
7
init_by_lua_file "/ddhome/kube-upstream/lua/init.lua";
8
9
init_worker_by_lua_file "/ddhome/kube-upstream/lua/init_worker.lua";
10
11
preread_by_lua_file "/ddhome/kube-upstream/lua/preread.lua";
12
13
upstream kube80 {
14
server 127.0.0.1:80;
15
balancer_by_lua_file "/ddhome/kube-upstream/lua/balancer80.lua";
16
}
17
18
upstream kube443 {
19
server 127.0.0.1:443;
20
balancer_by_lua_file "/ddhome/kube-upstream/lua/balancer443.lua";
21
}
22
23
access_log /ddhome/kube-upstream/logs/tcp-access.log proxy;
24
25
server {
26
listen 80;
27
proxy_connect_timeout 5s;
28
proxy_timeout 600s;
29
proxy_pass kube80;
30
}
31
32
server {
33
listen 443;
34
proxy_connect_timeout 5s;
35
proxy_timeout 600s;
36
proxy_pass kube443;
37
}
38
39
}
lua/init.lua
1
-- 初始化全局变量
2
http = require "resty.http"
3
cjson = require "cjson"
4
redis = require "resty.redis"
5
balancer = require "ngx.balancer"
6
-- k8s生存环境api地址
7
uri = "http://192.168.66.128:8080"
8
redis_host = "192.168.66.82"
9
redis_port = 6379
10
delay = 5
redis
1
opm install openresty/lua-resty-redis
2
yum install -y redis
lua/init_worker.lua
1
-- 4层健康检查,ingress为http和https设计,检查80和443端口
2
local health = function(s)
3
sock = ngx.socket.tcp()
4
sock:settimeout(3000)
5
local ok, err = sock:connect(s, 80)
6
if not ok then
7
ngx.log(ngx.ERR, "connect 80 err : ", s, err)
8
sock:close()
9
return false
10
end
11
local ok, err = sock:connect(s, 443)
12
if not ok then
13
ngx.log(ngx.ERR, "connect 443 err : ", s, err)
14
sock:close()
15
return false
16
end
17
sock:close()
18
return true
19
end
20
21
-- 请求k8s api接口获取工作节点
22
local nodes = function()
23
local httpc, err = http:new()
24
httpc:set_timeout(5000)
25
local res, err = httpc:request_uri(
26
uri, {
27
path = "/api/v1/nodes",
28
method = "GET"
29
}
30
)
31
32
if not res then
33
ngx.log(ngx.ERR, "failed to request: ", err)
34
return
35
end
36
data = res.body
37
httpc:close()
38
return data
39
end
40
41
-- 检查后生成table,存入redis
42
local store = function()
43
local server = {}
44
local rds, err = redis:new()
45
rds:set_timeout(1000)
46
47
local ok, err = rds:connect(redis_host, redis_port)
48
if not ok then
49
ngx.log(ngx.ERR, "fail to connect redis")
50
ngx.exit(500)
51
end
52
53
data = nodes()
54
if data then
55
local count = 0
56
for _, v in pairs(cjson.decode(data)['items']) do
57
s = v['metadata']['name']
58
if health(s) then
59
count = count+1
60
server[count]=s
61
end
62
end
63
end
64
local ok, err = rds:set("servs", cjson.encode(server))
65
if not ok then
66
ngx.log(ngx.ERR, "fail set redis")
67
ngx.exit(500)
68
end
69
end
70
71
-- 在openresty worker id为0的进程中运行timer任务
72
if ngx.worker.id() == 0 then
73
local ok, err = ngx.timer.every(delay, store)
74
if not ok then
75
ngx.log(ngx.ERR, "timer fail to start")
76
end
77
end
lua/preread.lua
1
-- balancer代码块无法运行io操作,只能在preread阶段读出数据
2
local rds, err = redis:new()
3
rds:set_timeout(1000)
4
5
local ok, err = rds:connect(redis_host, redis_port)
6
if not ok then
7
ngx.log(ngx.ERR, "fail to connect redis")
8
ngx.exit(500)
9
end
10
11
local res, err = rds:get("servs")
12
if not res then
13
ngx.log(ngx.ERR, "fail to get key servs")
14
return
15
end
16
17
ngx.ctx.servs = cjson.decode(res)
lua/balancer80.lua
1
-- 负载80端口到后端可用的ingress controller节点
2
local upstreams = ngx.ctx.servs
3
balancer.set_timeouts(1, 600, 600)
4
balancer.set_more_tries(2)
5
6
local n = math.random(#upstreams)
7
8
local ok, err = balancer.set_current_peer(
9
upstreams[n], 80
10
)
11
12
if not ok then
13
ngx.log(ngx.ERR, "80 failed to set peer: ", err)
14
return ngx.exit(500)
15
end
lua/balancer443.lua
1
-- 负载443端口到后端可用的ingress controller节点
2
local upstreams = ngx.ctx.servs
3
balancer.set_timeouts(1, 600, 600)
4
balancer.set_more_tries(2)
5
6
local n = math.random(#upstreams)
7
8
local ok, err = balancer.set_current_peer(
9
upstreams[n], 443
10
)
11
12
if not ok then
13
ngx.log(ngx.ERR, "443 failed to set peer: ", err)
14
return ngx.exit(500)
15
end