Skip to content

Commit 6603f99

Browse files
author
Frederic Spiers
committed
test(metrics): using custom endpoint metrics without lua with merge
1 parent f9d3830 commit 6603f99

File tree

9 files changed

+454
-14
lines changed

9 files changed

+454
-14
lines changed

apko/prod.yaml

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,21 @@ paths:
7474
permissions: 0o777
7575
uid: 65532
7676
gid: 65532
77+
- path: /var/run/openresty
78+
type: directory
79+
permissions: 0o777
80+
uid: 65532
81+
gid: 65532
82+
- path: /var/cache/openresty
83+
type: directory
84+
permissions: 0o777
85+
uid: 65532
86+
gid: 65532
87+
- path: /var/log/openresty
88+
type: directory
89+
permissions: 0o777
90+
uid: 65532
91+
gid: 65532
7792

7893
archs:
7994
- aarch64

docker/nginx/nginx.conf

Lines changed: 111 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
1-
load_module "/usr/lib/nginx/modules/ngx_stream_module.so";
2-
31
worker_processes 1;
4-
5-
error_log stderr notice;
6-
pid /var/run/nginx.pid;
2+
error_log stderr notice;
3+
pid /var/run/nginx.pid;
74

85
events {
96
worker_connections 1024;
@@ -13,6 +10,20 @@ http {
1310
map_hash_bucket_size 128;
1411
map_hash_max_size 4096;
1512

13+
client_body_temp_path /var/run/nginx-client-body;
14+
proxy_temp_path /var/run/nginx-proxy;
15+
fastcgi_temp_path /var/run/nginx-fastcgi;
16+
uwsgi_temp_path /var/run/nginx-uwsgi;
17+
scgi_temp_path /var/run/nginx-scgi;
18+
19+
lua_shared_dict metrics 10M;
20+
21+
init_by_lua_block {
22+
ngx.shared.metrics:set("http_requests_total", 0)
23+
ngx.shared.metrics:set("healthz_requests_total", 0)
24+
ngx.shared.metrics:set("metrics_requests_total", 0)
25+
}
26+
1627
log_format main '$remote_addr - $remote_user [$time_local] '
1728
'"$request" $status $body_bytes_sent '
1829
'"$http_referer" "$http_user_agent"';
@@ -24,11 +35,81 @@ http {
2435

2536
location / {
2637
return 404;
38+
39+
log_by_lua_block {
40+
local metrics = ngx.shared.metrics
41+
local total = metrics:get("http_requests_total") or 0
42+
metrics:set("http_requests_total", total + 1)
43+
}
2744
}
2845

2946
location /healthz {
3047
default_type text/plain;
3148
return 200 "OK\n";
49+
50+
log_by_lua_block {
51+
local metrics = ngx.shared.metrics
52+
53+
local healthz = metrics:get("healthz_requests_total") or 0
54+
metrics:set("healthz_requests_total", healthz + 1)
55+
56+
local total = metrics:get("http_requests_total") or 0
57+
metrics:set("http_requests_total", total + 1)
58+
}
59+
}
60+
61+
location /metrics {
62+
default_type text/plain;
63+
content_by_lua_block {
64+
local metrics = ngx.shared.metrics
65+
local stream_metrics = ngx.shared.stream_metrics
66+
local output = {}
67+
68+
table.insert(output, "# HELP nginx_up Nginx is running")
69+
table.insert(output, "# TYPE nginx_up gauge")
70+
table.insert(output, "nginx_up 1")
71+
table.insert(output, "")
72+
73+
table.insert(output, "# HELP nginx_http_requests_total Total HTTP requests")
74+
table.insert(output, "# TYPE nginx_http_requests_total counter")
75+
table.insert(output, "nginx_http_requests_total " .. (metrics:get("http_requests_total") or 0))
76+
table.insert(output, "")
77+
78+
table.insert(output, "# HELP nginx_healthz_requests_total Total healthz requests")
79+
table.insert(output, "# TYPE nginx_healthz_requests_total counter")
80+
table.insert(output, "nginx_healthz_requests_total " .. (metrics:get("healthz_requests_total") or 0))
81+
table.insert(output, "")
82+
83+
if stream_metrics then
84+
table.insert(output, "# HELP nginx_stream_connections_total Total stream connections")
85+
table.insert(output, "# TYPE nginx_stream_connections_total counter")
86+
table.insert(output, "nginx_stream_connections_total " .. (stream_metrics:get("stream_connections_total") or 0))
87+
table.insert(output, "")
88+
89+
local sum = stream_metrics:get("upstream_connect_time_sum") or 0
90+
local count = stream_metrics:get("upstream_connect_time_count") or 0
91+
local avg = count > 0 and (sum / count) or 0
92+
93+
table.insert(output, "# HELP nginx_stream_upstream_connect_time_seconds Average upstream connect time")
94+
table.insert(output, "# TYPE nginx_stream_upstream_connect_time_seconds gauge")
95+
table.insert(output, "nginx_stream_upstream_connect_time_seconds " .. string.format("%.6f", avg))
96+
table.insert(output, "")
97+
98+
table.insert(output, "# HELP nginx_stream_upstream_connect_time_sum_seconds Total upstream connect time")
99+
table.insert(output, "# TYPE nginx_stream_upstream_connect_time_sum_seconds counter")
100+
table.insert(output, "nginx_stream_upstream_connect_time_sum_seconds " .. string.format("%.6f", sum))
101+
table.insert(output, "")
102+
103+
table.insert(output, "# HELP nginx_stream_upstream_connect_time_count_total Total upstream connections")
104+
table.insert(output, "# TYPE nginx_stream_upstream_connect_time_count_total counter")
105+
table.insert(output, "nginx_stream_upstream_connect_time_count_total " .. count)
106+
end
107+
108+
ngx.say(table.concat(output, "\n"))
109+
110+
local metrics_count = metrics:get("metrics_requests_total") or 0
111+
metrics:set("metrics_requests_total", metrics_count + 1)
112+
}
32113
}
33114
}
34115
}
@@ -37,13 +118,20 @@ stream {
37118
map_hash_bucket_size 128;
38119
map_hash_max_size 4096;
39120

121+
lua_shared_dict stream_metrics 10M;
122+
123+
init_by_lua_block {
124+
ngx.shared.stream_metrics:set("stream_connections_total", 0)
125+
ngx.shared.stream_metrics:set("upstream_connect_time_sum", 0)
126+
ngx.shared.stream_metrics:set("upstream_connect_time_count", 0)
127+
}
128+
40129
log_format main '$proxy_protocol_addr - $remote_addr [$time_local] '
41130
'$protocol $status $bytes_sent $bytes_received '
42131
'$session_time "$upstream_addr" '
43132
'"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
44133

45134
access_log /dev/stdout main;
46-
47135
resolver kube-dns.kube-system.svc.cluster.local valid=30s;
48136
resolver_timeout 5s;
49137

@@ -52,5 +140,21 @@ stream {
52140
ssl_preread on;
53141
proxy_pass $ssl_preread_server_name:443;
54142
proxy_protocol off;
143+
144+
log_by_lua_block {
145+
local metrics = ngx.shared.stream_metrics
146+
local connect_time = tonumber(ngx.var.upstream_connect_time) or 0
147+
local upstream = ngx.var.upstream_addr or "unknown"
148+
149+
local connections = metrics:get("stream_connections_total") or 0
150+
metrics:set("stream_connections_total", connections + 1)
151+
152+
if connect_time > 0 then
153+
local sum = metrics:get("upstream_connect_time_sum") or 0
154+
local count = metrics:get("upstream_connect_time_count") or 0
155+
metrics:set("upstream_connect_time_sum", sum + connect_time)
156+
metrics:set("upstream_connect_time_count", count + 1)
157+
end
158+
}
55159
}
56-
}
160+
}

helm/ggbridge/README.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -88,13 +88,16 @@ A Helm chart for installing ggbridge
8888
| proxy.config.upstream.maxFails | int | `2` | Maximum number of unsuccessful attempts to communicate with the server |
8989
| proxy.labels | object | `{}` | Set proxy labels |
9090
| proxy.logLevel | string | `"notice"` | Set nginx sidecar container and proxy pod log level (default: notice) |
91+
| proxy.metrics.enabled | bool | `true` | |
92+
| proxy.metrics.service.annotations | object | `{}` | |
9193
| proxy.networkPolicy.allowExternal | bool | `true` | When true, server will accept connections from any source |
9294
| proxy.networkPolicy.enabled | bool | `true` | Specifies whether a NetworkPolicy should be created |
9395
| proxy.networkPolicy.extraEgress | list | `[]` | Add extra egress rules to the NetworkPolicy |
9496
| proxy.networkPolicy.extraIngress | list | `[]` | Add extra ingress rules to the NetworkPolicy |
9597
| proxy.networkPolicy.ingressNSMatchLabels | object | `{}` | Labels to match to allow traffic to the proxy server from other namespaces |
9698
| proxy.networkPolicy.ingressNSPodMatchLabels | object | `{}` | Pod labels to match to allow traffic to the proxy server from other namespaces |
9799
| proxy.nodeSelector | object | `{}` | Node labels for pod assignment |
100+
| proxy.openresty | object | `{"enabled":true}` | OpenResty config |
98101
| proxy.readinessProbe.enabled | bool | `true` | Whether to enable readiness probe for proxy |
99102
| proxy.readinessProbe.exec.command[0] | string | `"ggbridge"` | |
100103
| proxy.readinessProbe.exec.command[1] | string | `"healthcheck"` | |

0 commit comments

Comments
 (0)