mirror of
https://github.com/fosrl/newt.git
synced 2026-03-12 18:04:28 -05:00
Compare commits
553 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5e60da37d1 | ||
|
|
53d79aea5a | ||
|
|
0f6852b681 | ||
|
|
2b8e280f2e | ||
|
|
3a377d43de | ||
|
|
792057cf6c | ||
|
|
57afe91e85 | ||
|
|
3389088c43 | ||
|
|
e73150c187 | ||
|
|
18556f34b2 | ||
|
|
66c235624a | ||
|
|
b7af49d759 | ||
|
|
00a5fa1f37 | ||
|
|
d256d6c746 | ||
|
|
2cc957d55f | ||
|
|
d98eaa88b3 | ||
|
|
5b884042cd | ||
|
|
2265b61381 | ||
|
|
50fbfdc262 | ||
|
|
2055b773fd | ||
|
|
1c9c98e2f6 | ||
|
|
9c57677493 | ||
|
|
ff825a51dd | ||
|
|
cdfc5733f0 | ||
|
|
cadbb50bdf | ||
|
|
4ac33c824b | ||
|
|
d91228f636 | ||
|
|
6c3b85bb9a | ||
|
|
77d99f1722 | ||
|
|
43e1341352 | ||
|
|
daa1a90e05 | ||
|
|
3739c237c7 | ||
|
|
ddde1758e5 | ||
|
|
dca29781f3 | ||
|
|
91bfd69179 | ||
|
|
060d876429 | ||
|
|
69952efe89 | ||
|
|
66949ca047 | ||
|
|
8c12db6dff | ||
|
|
b84d465763 | ||
|
|
a62567997d | ||
|
|
9bb4bbccb8 | ||
|
|
c3fad797e5 | ||
|
|
0168b4796e | ||
|
|
6c05d76c88 | ||
|
|
a701add824 | ||
|
|
d754cea397 | ||
|
|
31d52ad3ff | ||
|
|
e1ee4dc8f2 | ||
|
|
f9b6f36b4f | ||
|
|
0e961761b8 | ||
|
|
baf1b9b972 | ||
|
|
f078136b5a | ||
|
|
ca341a8bb0 | ||
|
|
80ae03997a | ||
|
|
5c94789d9a | ||
|
|
6c65cc8e5e | ||
|
|
a21a8e90fa | ||
|
|
3d5335f2cb | ||
|
|
94788edce3 | ||
|
|
2bbe037544 | ||
|
|
9b015e9f7c | ||
|
|
3305f711b9 | ||
|
|
ff7fe1275b | ||
|
|
1cbf41e094 | ||
|
|
9bc35433ef | ||
|
|
b8349aab4e | ||
|
|
3f29a553ae | ||
|
|
745045f619 | ||
|
|
3783a12055 | ||
|
|
a9b84c8c09 | ||
|
|
5c5ef4c7e6 | ||
|
|
6e9249e664 | ||
|
|
55be2a52a5 | ||
|
|
058330d41b | ||
|
|
5e7b970115 | ||
|
|
dc180abba9 | ||
|
|
004bb9b12d | ||
|
|
0637360b31 | ||
|
|
d5e0771094 | ||
|
|
1dcb68d694 | ||
|
|
865ac4b682 | ||
|
|
de5627b0b7 | ||
|
|
44470abd54 | ||
|
|
4bb0537c39 | ||
|
|
92fb96f9bd | ||
|
|
b68b7fe49d | ||
|
|
1da424bb20 | ||
|
|
22e5104a41 | ||
|
|
b96adeaa5b | ||
|
|
533e0b9ca7 | ||
|
|
bd86abe8d5 | ||
|
|
d978b27ebc | ||
|
|
cdfcf49d89 | ||
|
|
2fb4bf09ea | ||
|
|
dddae547f5 | ||
|
|
73a14f5fa1 | ||
|
|
67d5217379 | ||
|
|
9f1f1328f6 | ||
|
|
30da7eaa8b | ||
|
|
0fca3457c3 | ||
|
|
1271e8235e | ||
|
|
24c6edf3e0 | ||
|
|
1875c987fe | ||
|
|
7cb1f7e2c2 | ||
|
|
3f4f4fa15c | ||
|
|
bf33a3d81f | ||
|
|
21ffc0ff4b | ||
|
|
13de05eec6 | ||
|
|
0e76b77adc | ||
|
|
c604f46065 | ||
|
|
f02e29f4dd | ||
|
|
6d79856895 | ||
|
|
bbece243dd | ||
|
|
6948066ae4 | ||
|
|
3bcafbf07a | ||
|
|
87e2eb33db | ||
|
|
5ce3f4502d | ||
|
|
e5e733123b | ||
|
|
f417ee32fb | ||
|
|
37c96d0b3e | ||
|
|
78dc39e153 | ||
|
|
71485743ad | ||
|
|
458912e5be | ||
|
|
2bc91d6c68 | ||
|
|
95c3efc365 | ||
|
|
72a9e111dc | ||
|
|
3c86edf0d5 | ||
|
|
32b1b817ac | ||
|
|
02949be245 | ||
|
|
6d51cbf0c0 | ||
|
|
4dbf200cca | ||
|
|
d8b4fb4acb | ||
|
|
ac691517ae | ||
|
|
8a45f6fd63 | ||
|
|
7f650bbfdf | ||
|
|
15b40b0f24 | ||
|
|
e27e6fbce8 | ||
|
|
f9fb13a0d7 | ||
|
|
8db50d94c0 | ||
|
|
09568c1aaf | ||
|
|
c7d656214f | ||
|
|
d981a82b1c | ||
|
|
5dd5a56379 | ||
|
|
8c4d6e2e0a | ||
|
|
284f1ce627 | ||
|
|
cd466ac43f | ||
|
|
2256d1f041 | ||
|
|
40ca839771 | ||
|
|
01ec6a0ce0 | ||
|
|
d04f6cf702 | ||
|
|
cdaff27964 | ||
|
|
de96be810b | ||
|
|
ba43083f04 | ||
|
|
5196effdb8 | ||
|
|
d6edd6ca01 | ||
|
|
1b1323b553 | ||
|
|
bb95d10e86 | ||
|
|
da04746781 | ||
|
|
a38e0b3e98 | ||
|
|
6ced7b5af0 | ||
|
|
61b9615aea | ||
|
|
39f5782583 | ||
|
|
025c94e586 | ||
|
|
75e666c396 | ||
|
|
82a999eb87 | ||
|
|
921e72f628 | ||
|
|
46b33fdca6 | ||
|
|
9caa9fa31e | ||
|
|
dbbea6b34c | ||
|
|
491180c6a1 | ||
|
|
f49a276259 | ||
|
|
c71c6e0b1a | ||
|
|
972c9a9760 | ||
|
|
8f7ee2a8dc | ||
|
|
a737c3e8de | ||
|
|
1ba10c1b68 | ||
|
|
b1f2fe8283 | ||
|
|
a1fdb06add | ||
|
|
25d5fab02b | ||
|
|
2c8755f346 | ||
|
|
348cac66c8 | ||
|
|
6226a262d6 | ||
|
|
5b70feb6a5 | ||
|
|
0ec18d6655 | ||
|
|
7d60240572 | ||
|
|
ee3e7d1442 | ||
|
|
527321a415 | ||
|
|
ff07692248 | ||
|
|
8d3ae5afd7 | ||
|
|
ed99dce7e0 | ||
|
|
f1e07272bd | ||
|
|
a1a3d63fcf | ||
|
|
2a273dc435 | ||
|
|
ec05686523 | ||
|
|
915e7e44d1 | ||
|
|
a729b91ac3 | ||
|
|
ddc37658df | ||
|
|
7c780f7a4f | ||
|
|
6b1c1ed077 | ||
|
|
7a07437b22 | ||
|
|
d63d8d6f5e | ||
|
|
bda1d04f67 | ||
|
|
7f8ee37c7f | ||
|
|
6d2073a478 | ||
|
|
6048f244f1 | ||
|
|
9fec22a53b | ||
|
|
c086e69dd0 | ||
|
|
c729ab5fc6 | ||
|
|
552617cbb5 | ||
|
|
b383cec0b0 | ||
|
|
fb110ba2a1 | ||
|
|
f287888480 | ||
|
|
348b8f6b94 | ||
|
|
71c5bf7e65 | ||
|
|
dda0b414cc | ||
|
|
8f224e2a45 | ||
|
|
90243cd6c6 | ||
|
|
9b79af10ed | ||
|
|
31b1ffcbe9 | ||
|
|
f1c4e1db71 | ||
|
|
72a61d0933 | ||
|
|
e489a2cc66 | ||
|
|
4e648af8e9 | ||
|
|
5d891225de | ||
|
|
9864965381 | ||
|
|
75f6362a90 | ||
|
|
30907188fb | ||
|
|
5f11df8df2 | ||
|
|
7eea6dd335 | ||
|
|
9dc5a3d91c | ||
|
|
1881309148 | ||
|
|
aff928e60f | ||
|
|
f6e7bfe8ea | ||
|
|
60873f0a4f | ||
|
|
50bb81981b | ||
|
|
4ced99fa3f | ||
|
|
9bd96ac540 | ||
|
|
c673743692 | ||
|
|
a08a3b9665 | ||
|
|
0fc13be413 | ||
|
|
92cedd00b3 | ||
|
|
8b0cc36554 | ||
|
|
ba9ca9f097 | ||
|
|
8b4a88937c | ||
|
|
58412a7a61 | ||
|
|
2675b812aa | ||
|
|
217a9346c6 | ||
|
|
eda8073bce | ||
|
|
2969f9d2d6 | ||
|
|
07b7025a24 | ||
|
|
502ebfc362 | ||
|
|
288413fd15 | ||
|
|
0ba44206b1 | ||
|
|
3f8dcd8f22 | ||
|
|
c5c0143013 | ||
|
|
87ac5c97e3 | ||
|
|
e2238c3cc8 | ||
|
|
58a67328d3 | ||
|
|
002fdc4d3f | ||
|
|
9a1fa2c19f | ||
|
|
a6797172ef | ||
|
|
d373de7fa1 | ||
|
|
f876bad632 | ||
|
|
54b096e6a7 | ||
|
|
10720afd31 | ||
|
|
0b37f20d5d | ||
|
|
aa6e54f383 | ||
|
|
30f8eb9785 | ||
|
|
e765d9c774 | ||
|
|
3ae4ac23ef | ||
|
|
6a98b90b01 | ||
|
|
e0ce9d4e48 | ||
|
|
5914c9ed33 | ||
|
|
109bda961f | ||
|
|
c2a93134b1 | ||
|
|
100d8e6afe | ||
|
|
04f2048a0a | ||
|
|
04de5ef8ba | ||
|
|
e77601cccc | ||
|
|
e9752f868e | ||
|
|
866afaf749 | ||
|
|
a12ae17a66 | ||
|
|
e0cba2e5c6 | ||
|
|
79f3db6fb6 | ||
|
|
009b4cf425 | ||
|
|
9c28d75155 | ||
|
|
bad244d0ea | ||
|
|
d013dc0543 | ||
|
|
0047b54e94 | ||
|
|
f0c8d2c7c7 | ||
|
|
28b6865f73 | ||
|
|
d52f89f629 | ||
|
|
289cce3a22 | ||
|
|
e8612c7e6b | ||
|
|
6820f8d23e | ||
|
|
151d0e38e6 | ||
|
|
a9d8ec0b1e | ||
|
|
e9dbfb239b | ||
|
|
a79dccc0e4 | ||
|
|
42dfb6b3d8 | ||
|
|
3ccd755d55 | ||
|
|
a0f0b674e8 | ||
|
|
9e675121d3 | ||
|
|
45d17da570 | ||
|
|
dfba35f8bb | ||
|
|
9e73aab21d | ||
|
|
e1ddad006a | ||
|
|
29567d6e0b | ||
|
|
47321ea9ad | ||
|
|
abfc9d8efc | ||
|
|
c6929621e7 | ||
|
|
46993203a3 | ||
|
|
8306084354 | ||
|
|
02c1e2b7d0 | ||
|
|
ae7e2a1055 | ||
|
|
88f1335cff | ||
|
|
8bf9c9795b | ||
|
|
5d343cd420 | ||
|
|
d1473b7e22 | ||
|
|
2efbd7dd6a | ||
|
|
82a3a39a1f | ||
|
|
df09193834 | ||
|
|
b2fe4e3b03 | ||
|
|
e14d53087f | ||
|
|
3583270f73 | ||
|
|
f5be05c55a | ||
|
|
d09e3fbd60 | ||
|
|
493831b5f0 | ||
|
|
9fc692c090 | ||
|
|
ccb7008579 | ||
|
|
f17dbe1fef | ||
|
|
27561f52ca | ||
|
|
499ebcd928 | ||
|
|
40dfab31a5 | ||
|
|
56377ec87e | ||
|
|
008be54c55 | ||
|
|
64c22a94a4 | ||
|
|
468c93c581 | ||
|
|
c53b859cda | ||
|
|
6cd824baf2 | ||
|
|
d8c5182acd | ||
|
|
c8c4666d63 | ||
|
|
f1fcc13e66 | ||
|
|
52bbc2fe31 | ||
|
|
b5ee12f84a | ||
|
|
510e78437c | ||
|
|
e14cffce1c | ||
|
|
629a92ee81 | ||
|
|
56df75544d | ||
|
|
5b2e743470 | ||
|
|
b5025c142f | ||
|
|
cd86e6b6de | ||
|
|
230c34e4e0 | ||
|
|
a038ce1458 | ||
|
|
cd83efd365 | ||
|
|
702f39e870 | ||
|
|
02b7ea51af | ||
|
|
e8421364fc | ||
|
|
7264bb7001 | ||
|
|
86e262ac1e | ||
|
|
dcacc03e96 | ||
|
|
6f4469a5a4 | ||
|
|
663e28329b | ||
|
|
f513f97fc3 | ||
|
|
ce4f3e4cdf | ||
|
|
58a74fce6f | ||
|
|
fc965abbc4 | ||
|
|
b881808cae | ||
|
|
6160e4c8a6 | ||
|
|
4d343e3541 | ||
|
|
71d1bbaaf2 | ||
|
|
9eb8e5122a | ||
|
|
c593e2aa97 | ||
|
|
c3483ded8f | ||
|
|
5aa00a65c2 | ||
|
|
675797c23f | ||
|
|
b1cfd3ba02 | ||
|
|
2d9b761de9 | ||
|
|
99506a10f6 | ||
|
|
9410b92169 | ||
|
|
53397663ef | ||
|
|
221d5862fb | ||
|
|
e4bdbbec7c | ||
|
|
07bd283604 | ||
|
|
b7d4ea0c84 | ||
|
|
d10c5e0366 | ||
|
|
61a9097baf | ||
|
|
54416bbc92 | ||
|
|
a88d25f369 | ||
|
|
c2a326c70a | ||
|
|
be56550da4 | ||
|
|
2ecf3297cd | ||
|
|
a896291831 | ||
|
|
9348842e2c | ||
|
|
ec8fc20438 | ||
|
|
a39d056725 | ||
|
|
450fc6c20f | ||
|
|
b7aa5d1396 | ||
|
|
837f749cdb | ||
|
|
56b68ae3b0 | ||
|
|
295cadc0b7 | ||
|
|
0add2ec668 | ||
|
|
071a51afbc | ||
|
|
9db3b78373 | ||
|
|
700287163e | ||
|
|
e357e7befb | ||
|
|
a76e6c9637 | ||
|
|
678d82fa68 | ||
|
|
6dd9b44fd2 | ||
|
|
63aea704bd | ||
|
|
c1a2a3208c | ||
|
|
ac67df63fa | ||
|
|
e642983b88 | ||
|
|
e2dd965654 | ||
|
|
a6849059b9 | ||
|
|
5f14ff3a07 | ||
|
|
040f5375a6 | ||
|
|
227631665e | ||
|
|
ca3ffa00ed | ||
|
|
6d3938e14e | ||
|
|
37191924ee | ||
|
|
a14f70dbaa | ||
|
|
bb1318278a | ||
|
|
1c75eb3bee | ||
|
|
4b64b04603 | ||
|
|
b397016da8 | ||
|
|
850c230c4a | ||
|
|
c82b84194f | ||
|
|
50df49e556 | ||
|
|
5f9c041c6b | ||
|
|
7f9b700ec3 | ||
|
|
95d4cb2758 | ||
|
|
bbea9a91da | ||
|
|
7c971d278c | ||
|
|
f6bfa70c10 | ||
|
|
7f8e3d9ef9 | ||
|
|
6d7e7e7a93 | ||
|
|
48cb0bf5a7 | ||
|
|
58f7835072 | ||
|
|
6d9160ab5e | ||
|
|
8d4d8b91b9 | ||
|
|
d0e220511a | ||
|
|
5cb86f3e47 | ||
|
|
e26552a5d7 | ||
|
|
22f44c860a | ||
|
|
64aef4ff34 | ||
|
|
ef2cc34f02 | ||
|
|
5476a69963 | ||
|
|
2e6ab2ba41 | ||
|
|
126ced6d57 | ||
|
|
cbbd5b0c76 | ||
|
|
e335bb8a1f | ||
|
|
e053eff879 | ||
|
|
a2c22eff35 | ||
|
|
52a8aabdb8 | ||
|
|
38b7f17e58 | ||
|
|
8941604f71 | ||
|
|
1820c8c019 | ||
|
|
1ecffab79a | ||
|
|
4ce2a656b8 | ||
|
|
d4b88c3985 | ||
|
|
a5f4d5fdf6 | ||
|
|
ce6d340a8d | ||
|
|
eaf812a2a7 | ||
|
|
a52260b49d | ||
|
|
a4d4976103 | ||
|
|
acab633da6 | ||
|
|
036e255b47 | ||
|
|
faffaad926 | ||
|
|
157bb98fd3 | ||
|
|
50b621f17c | ||
|
|
35d82ea15c | ||
|
|
21f5aa906d | ||
|
|
4a70af44bb | ||
|
|
5280c7ccda | ||
|
|
ef2f25ef98 | ||
|
|
eb8a12f290 | ||
|
|
a937027838 | ||
|
|
f8653e245e | ||
|
|
c423f6692a | ||
|
|
a9b96637b9 | ||
|
|
f566f599d6 | ||
|
|
918a9bdb84 | ||
|
|
315b6f3721 | ||
|
|
37940444c1 | ||
|
|
4e9aa30686 | ||
|
|
c2d3f00a6e | ||
|
|
9f006b1cbd | ||
|
|
1ef61d7470 | ||
|
|
6935c3b8db | ||
|
|
994d11b40c | ||
|
|
f060306654 | ||
|
|
a3cfda9fc5 | ||
|
|
607d197b02 | ||
|
|
78f31a56b0 | ||
|
|
03988655b6 | ||
|
|
4cf83f4cfc | ||
|
|
494e30704b | ||
|
|
175718a48e | ||
|
|
6a146ed371 | ||
|
|
027d9a059f | ||
|
|
0ced66e157 | ||
|
|
641c7f27a2 | ||
|
|
67abd239b6 | ||
|
|
3b166c465d | ||
|
|
e0d2349efa | ||
|
|
7b7d7228a6 | ||
|
|
a1a439c75c | ||
|
|
6b0ca9cab5 | ||
|
|
e7c8dbc1c8 | ||
|
|
d28e3ca5e8 | ||
|
|
b41570eb2c | ||
|
|
e47ddaa916 | ||
|
|
65dc81ca8b | ||
|
|
09d6829f8b | ||
|
|
f677376fae | ||
|
|
72e0adc1bf | ||
|
|
435b638701 | ||
|
|
9b3c82648b | ||
|
|
f713c294b2 | ||
|
|
b3e8bf7d12 | ||
|
|
c5978d9c4e | ||
|
|
7f9a31ac3e | ||
|
|
f08378b67e | ||
|
|
7852f11e8d | ||
|
|
2ff8df9a8d | ||
|
|
9d80161ab7 | ||
|
|
1501de691a | ||
|
|
2a19856556 | ||
|
|
f4e17a4dd7 | ||
|
|
ab544fc9ed | ||
|
|
f9e52c4d91 | ||
|
|
14eff8e16c | ||
|
|
067e079293 | ||
|
|
5e673c829b | ||
|
|
cd3ec0b259 | ||
|
|
b68502de9e | ||
|
|
f6429b6eee | ||
|
|
8795c57b2e | ||
|
|
4aa718d55f | ||
|
|
afa93d8a3f | ||
|
|
270ee9cd19 | ||
|
|
0affef401c | ||
|
|
18d99de924 | ||
|
|
bff6707577 | ||
|
|
95eab504fa | ||
|
|
56e75902e3 | ||
|
|
45a1ab91d7 | ||
|
|
fb199cc94b | ||
|
|
66edae4288 | ||
|
|
f69a7f647d | ||
|
|
e8bd55bed9 |
5
.env.example
Normal file
5
.env.example
Normal file
@@ -0,0 +1,5 @@
|
||||
# Copy this file to .env and fill in your values
|
||||
# Required for connecting to Pangolin service
|
||||
PANGOLIN_ENDPOINT=https://app.pangolin.net
|
||||
NEWT_ID=changeme-id
|
||||
NEWT_SECRET=changeme-secret
|
||||
47
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
47
.github/DISCUSSION_TEMPLATE/feature-requests.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Summary
|
||||
description: A clear and concise summary of the requested feature.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Motivation
|
||||
description: |
|
||||
Why is this feature important?
|
||||
Explain the problem this feature would solve or what use case it would enable.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
description: |
|
||||
How would you like to see this feature implemented?
|
||||
Provide as much detail as possible about the desired behavior, configuration, or changes.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Alternatives Considered
|
||||
description: Describe any alternative solutions or workarounds you've thought about.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Additional Context
|
||||
description: Add any other context, mockups, or screenshots about the feature request here.
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before submitting, please:
|
||||
- Check if there is an existing issue for this feature.
|
||||
- Clearly explain the benefit and use case.
|
||||
- Be as specific as possible to help contributors evaluate and implement.
|
||||
51
.github/ISSUE_TEMPLATE/1.bug_report.yml
vendored
Normal file
51
.github/ISSUE_TEMPLATE/1.bug_report.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Bug Report
|
||||
description: Create a bug report
|
||||
labels: []
|
||||
body:
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Describe the Bug
|
||||
description: A clear and concise description of what the bug is.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Environment
|
||||
description: Please fill out the relevant details below for your environment.
|
||||
value: |
|
||||
- OS Type & Version: (e.g., Ubuntu 22.04)
|
||||
- Pangolin Version:
|
||||
- Gerbil Version:
|
||||
- Traefik Version:
|
||||
- Newt Version:
|
||||
- Olm Version: (if applicable)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: To Reproduce
|
||||
description: |
|
||||
Steps to reproduce the behavior, please provide a clear description of how to reproduce the issue, based on the linked minimal reproduction. Screenshots can be provided in the issue body below.
|
||||
|
||||
If using code blocks, make sure syntax highlighting is correct and double-check that the rendered preview is not broken.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: A clear and concise description of what you expected to happen.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Before posting the issue go through the steps you've written down to make sure the steps provided are detailed and clear.
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Contributors should be able to follow the steps provided in order to reproduce the bug.
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Need help or have questions?
|
||||
url: https://github.com/orgs/fosrl/discussions
|
||||
about: Ask questions, get help, and discuss with other community members
|
||||
- name: Request a Feature
|
||||
url: https://github.com/orgs/fosrl/discussions/new?category=feature-requests
|
||||
about: Feature requests should be opened as discussions so others can upvote and comment
|
||||
40
.github/dependabot.yml
vendored
Normal file
40
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
dev-patch-updates:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "patch"
|
||||
dev-minor-updates:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
prod-patch-updates:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "patch"
|
||||
prod-minor-updates:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
patch-updates:
|
||||
update-types:
|
||||
- "patch"
|
||||
minor-updates:
|
||||
update-types:
|
||||
- "minor"
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
243
.github/workflows/cicd.yml
vendored
243
.github/workflows/cicd.yml
vendored
@@ -1,61 +1,212 @@
|
||||
name: CI/CD Pipeline
|
||||
|
||||
permissions:
|
||||
contents: write # gh-release
|
||||
packages: write # GHCR push
|
||||
id-token: write # Keyless-Signatures & Attestations
|
||||
attestations: write # actions/attest-build-provenance
|
||||
security-events: write # upload-sarif
|
||||
actions: read
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "*"
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: "SemVer version to release (e.g., 1.2.3, no leading 'v')"
|
||||
required: true
|
||||
type: string
|
||||
target_branch:
|
||||
description: "Branch to tag"
|
||||
required: false
|
||||
default: "main"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event_name == 'workflow_dispatch' && github.event.inputs.version || github.ref_name }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-latest
|
||||
prepare:
|
||||
if: github.event_name == 'workflow_dispatch'
|
||||
name: Prepare release (create tag)
|
||||
runs-on: ubuntu-24.04
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
- name: Validate version input
|
||||
shell: bash
|
||||
env:
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if ! [[ "$INPUT_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
||||
echo "Invalid version: $INPUT_VERSION (expected X.Y.Z or X.Y.Z-rc.N)" >&2
|
||||
exit 1
|
||||
fi
|
||||
- name: Create and push tag
|
||||
shell: bash
|
||||
env:
|
||||
TARGET_BRANCH: ${{ inputs.target_branch }}
|
||||
VERSION: ${{ inputs.version }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git fetch --prune origin
|
||||
git checkout "$TARGET_BRANCH"
|
||||
git pull --ff-only origin "$TARGET_BRANCH"
|
||||
if git rev-parse -q --verify "refs/tags/$VERSION" >/dev/null; then
|
||||
echo "Tag $VERSION already exists" >&2
|
||||
exit 1
|
||||
fi
|
||||
git tag -a "$VERSION" -m "Release $VERSION"
|
||||
git push origin "refs/tags/$VERSION"
|
||||
release:
|
||||
if: ${{ github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.actor != 'github-actions[bot]') }}
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-24.04
|
||||
timeout-minutes: 120
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Extract tag name
|
||||
env:
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
INPUT_VERSION: ${{ inputs.version }}
|
||||
run: |
|
||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||
echo "TAG=${INPUT_VERSION}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "TAG=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
- name: Validate pushed tag format (no leading 'v')
|
||||
if: ${{ github.event_name == 'push' }}
|
||||
shell: bash
|
||||
env:
|
||||
TAG_GOT: ${{ env.TAG }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [[ "$TAG_GOT" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
||||
echo "Tag OK: $TAG_GOT"
|
||||
exit 0
|
||||
fi
|
||||
echo "ERROR: Tag '$TAG_GOT' is not allowed. Use 'X.Y.Z' or 'X.Y.Z-rc.N' (no leading 'v')." >&2
|
||||
exit 1
|
||||
- name: Wait for tag to be visible (dispatch only)
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for i in {1..90}; do
|
||||
if git ls-remote --tags origin "refs/tags/${TAG}" | grep -qE "refs/tags/${TAG}$"; then
|
||||
echo "Tag ${TAG} is visible on origin"; exit 0
|
||||
fi
|
||||
echo "Tag not yet visible, retrying... ($i/90)"
|
||||
sleep 2
|
||||
done
|
||||
echo "Tag ${TAG} not visible after waiting"; exit 1
|
||||
shell: bash
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
- name: Update version in main.go
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ -f main.go ]; then
|
||||
sed -i 's/version_replaceme/'"$TAG"'/' main.go
|
||||
echo "Updated main.go with version $TAG"
|
||||
else
|
||||
echo "main.go not found"
|
||||
fi
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.23.1
|
||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git fetch --tags --force
|
||||
git checkout "refs/tags/${TAG}"
|
||||
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
||||
shell: bash
|
||||
|
||||
- name: Update version in main.go
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ -f main.go ]; then
|
||||
sed -i 's/Newt version replaceme/Newt version '"$TAG"'/' main.go
|
||||
echo "Updated main.go with version $TAG"
|
||||
else
|
||||
echo "main.go not found"
|
||||
fi
|
||||
- name: Detect release candidate (rc)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if [[ "${TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make docker-build-release tag=$TAG
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Build binaries
|
||||
run: |
|
||||
make go-build-release
|
||||
- name: Cache Go modules
|
||||
if: ${{ hashFiles('**/go.sum') != '' }}
|
||||
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Go vet & test
|
||||
if: ${{ hashFiles('**/go.mod') != '' }}
|
||||
run: |
|
||||
go version
|
||||
go vet ./...
|
||||
go test ./... -race -covermode=atomic
|
||||
shell: bash
|
||||
|
||||
- name: Upload artifacts from /bin
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: binaries
|
||||
path: bin/
|
||||
# - name: Trivy scan (GHCR image)
|
||||
# id: trivy
|
||||
# uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||
# with:
|
||||
# image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
|
||||
# format: sarif
|
||||
# output: trivy-ghcr.sarif
|
||||
# ignore-unfixed: true
|
||||
# vuln-type: os,library
|
||||
# severity: CRITICAL,HIGH
|
||||
# exit-code: ${{ (vars.TRIVY_FAIL || '0') }}
|
||||
|
||||
# - name: Upload SARIF,trivy
|
||||
# if: ${{ always() && hashFiles('trivy-ghcr.sarif') != '' }}
|
||||
# uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5
|
||||
# with:
|
||||
# sarif_file: trivy-ghcr.sarif
|
||||
# category: Image Vulnerability Scan
|
||||
|
||||
#- name: Build binaries
|
||||
# env:
|
||||
# CGO_ENABLED: "0"
|
||||
# GOFLAGS: "-trimpath"
|
||||
# run: |
|
||||
# set -euo pipefail
|
||||
# TAG_VAR="${TAG}"
|
||||
# make -j 10 go-build-release tag=$TAG_VAR
|
||||
# shell: bash
|
||||
|
||||
- name: Run GoReleaser (binaries + deb/rpm/apk)
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
140
.github/workflows/mirror.yaml
vendored
Normal file
140
.github/workflows/mirror.yaml
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
source_image:
|
||||
description: "Source image (e.g., docker.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
dest_image:
|
||||
description: "Destination image (e.g., ghcr.io/owner/newt)"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
SOURCE_IMAGE: ${{ inputs.source_image }}
|
||||
DEST_IMAGE: ${{ inputs.dest_image }}
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
23
.github/workflows/nix-build.yml
vendored
Normal file
23
.github/workflows/nix-build.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Build Nix package
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- go.mod
|
||||
- go.sum
|
||||
|
||||
jobs:
|
||||
nix-build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
|
||||
- name: Build flake package
|
||||
run: |
|
||||
nix build .#pangolin-newt -L
|
||||
48
.github/workflows/nix-dependabot-update-hash.yml
vendored
Normal file
48
.github/workflows/nix-dependabot-update-hash.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: Update Nix Package Hash On Dependabot PRs
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
nix-update:
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
|
||||
- name: Run nix-update
|
||||
run: |
|
||||
nix run nixpkgs#nix-update -- --flake pangolin-newt --no-src --version skip
|
||||
|
||||
- name: Check for changes
|
||||
id: changes
|
||||
run: |
|
||||
if git diff --quiet; then
|
||||
echo "changed=false" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo "changed=true" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
|
||||
- name: Commit and push changes
|
||||
if: steps.changes.outputs.changed == 'true'
|
||||
run: |
|
||||
git config user.name "dependabot[bot]"
|
||||
git config user.email "dependabot[bot]@users.noreply.github.com"
|
||||
|
||||
git add .
|
||||
git commit -m "chore(nix): fix hash for updated go dependencies"
|
||||
git push
|
||||
64
.github/workflows/publish-apt.yml
vendored
Normal file
64
.github/workflows/publish-apt.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
name: Publish APT repo to S3/CloudFront
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to publish (e.g. 1.9.0). Leave empty to use latest release."
|
||||
required: false
|
||||
type: string
|
||||
backfill_all:
|
||||
description: "Build/publish repo for ALL releases."
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-24.04
|
||||
env:
|
||||
PKG_NAME: newt
|
||||
SUITE: stable
|
||||
COMPONENT: main
|
||||
REPO_BASE_URL: https://repo.dev.fosrl.io/apt
|
||||
|
||||
AWS_REGION: ${{ vars.AWS_REGION }}
|
||||
S3_BUCKET: ${{ vars.S3_BUCKET }}
|
||||
S3_PREFIX: ${{ vars.S3_PREFIX }}
|
||||
CLOUDFRONT_DISTRIBUTION_ID: ${{ vars.CLOUDFRONT_DISTRIBUTION_ID }}
|
||||
|
||||
INPUT_TAG: ${{ inputs.tag }}
|
||||
BACKFILL_ALL: ${{ inputs.backfill_all }}
|
||||
EVENT_TAG: ${{ github.event.release.tag_name }}
|
||||
PUSH_TAG: ${{ github.ref_name }}
|
||||
GH_REPO: ${{ github.repository }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials (OIDC)
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
|
||||
aws-region: ${{ vars.AWS_REGION }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y dpkg-dev apt-utils gnupg curl jq gh
|
||||
|
||||
- name: Publish APT repo
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
APT_GPG_PRIVATE_KEY: ${{ secrets.APT_GPG_PRIVATE_KEY }}
|
||||
APT_GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }}
|
||||
run: ./scripts/publish-apt.sh
|
||||
37
.github/workflows/stale-bot.yml
vendored
Normal file
37
.github/workflows/stale-bot.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Mark and Close Stale Issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch: # Allow manual trigger
|
||||
|
||||
permissions:
|
||||
contents: write # only for delete-branch option
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
stale-issue-message: 'This issue has been automatically marked as stale due to 14 days of inactivity. It will be closed in 14 days if no further activity occurs.'
|
||||
close-issue-message: 'This issue has been automatically closed due to inactivity. If you believe this is still relevant, please open a new issue with up-to-date information.'
|
||||
stale-issue-label: 'stale'
|
||||
|
||||
exempt-issue-labels: 'needs investigating, networking, new feature, reverse proxy, bug, api, authentication, documentation, enhancement, help wanted, good first issue, question'
|
||||
|
||||
exempt-all-issue-assignees: true
|
||||
|
||||
only-labels: ''
|
||||
exempt-pr-labels: ''
|
||||
days-before-pr-stale: -1
|
||||
days-before-pr-close: -1
|
||||
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
39
.github/workflows/test.yml
vendored
Normal file
39
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target:
|
||||
- local
|
||||
#- docker-build
|
||||
- go-build-release-darwin-amd64
|
||||
- go-build-release-darwin-arm64
|
||||
- go-build-release-freebsd-amd64
|
||||
- go-build-release-freebsd-arm64
|
||||
- go-build-release-linux-amd64
|
||||
- go-build-release-linux-arm32-v6
|
||||
- go-build-release-linux-arm32-v7
|
||||
- go-build-release-linux-riscv64
|
||||
- go-build-release-windows-amd64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||
with:
|
||||
go-version: 1.25
|
||||
|
||||
- name: Build targets via `make`
|
||||
run: make ${{ matrix.target }}
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,3 +1,10 @@
|
||||
newt
|
||||
.DS_Store
|
||||
bin/
|
||||
bin/
|
||||
nohup.out
|
||||
.idea
|
||||
*.iml
|
||||
certs/
|
||||
newt_arm64
|
||||
key
|
||||
/.direnv/
|
||||
/result*
|
||||
|
||||
@@ -1 +1 @@
|
||||
1.23.2
|
||||
1.25
|
||||
|
||||
52
.goreleaser.yaml
Normal file
52
.goreleaser.yaml
Normal file
@@ -0,0 +1,52 @@
|
||||
version: 2
|
||||
project_name: newt
|
||||
|
||||
release:
|
||||
draft: true
|
||||
prerelease: "{{ contains .Tag \"-rc.\" }}"
|
||||
name_template: "{{ .Tag }}"
|
||||
|
||||
builds:
|
||||
- id: newt
|
||||
main: ./main.go
|
||||
binary: newt
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
flags:
|
||||
- -trimpath
|
||||
ldflags:
|
||||
- -s -w -X main.newtVersion={{ .Tag }}
|
||||
|
||||
archives:
|
||||
- id: binaries
|
||||
builds:
|
||||
- newt
|
||||
format: binary
|
||||
name_template: "{{ .ProjectName }}_{{ .Os }}_{{ .Arch }}"
|
||||
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
||||
nfpms:
|
||||
- id: packages
|
||||
package_name: newt
|
||||
builds:
|
||||
- newt
|
||||
vendor: fosrl
|
||||
maintainer: fosrl <repo@fosrl.io>
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
license: AGPL-3.0-or-later
|
||||
formats:
|
||||
- deb
|
||||
- rpm
|
||||
- apk
|
||||
bindir: /usr/bin
|
||||
file_name_template: "newt_{{ .Version }}_{{ .Arch }}"
|
||||
contents:
|
||||
- src: LICENSE
|
||||
dst: /usr/share/doc/newt/LICENSE
|
||||
@@ -4,11 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.fossorial.io/development
|
||||
|
||||
For ideas about what features to work on and our future plans, please see the roadmap:
|
||||
|
||||
https://docs.fossorial.io/roadmap
|
||||
https://docs.pangolin.net/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
@@ -21,4 +17,4 @@ By creating this pull request, I grant the project maintainers an unlimited,
|
||||
perpetual license to use, modify, and redistribute these contributions under any terms they
|
||||
choose, including both the AGPLv3 and the Fossorial Commercial license terms. I
|
||||
represent that I have the right to grant this license for all contributed content.
|
||||
```
|
||||
```
|
||||
16
Dockerfile
16
Dockerfile
@@ -1,4 +1,7 @@
|
||||
FROM golang:1.23.1-alpine AS builder
|
||||
FROM golang:1.25-alpine AS builder
|
||||
|
||||
# Install git and ca-certificates
|
||||
RUN apk --no-cache add ca-certificates git tzdata
|
||||
|
||||
# Set the working directory inside the container
|
||||
WORKDIR /app
|
||||
@@ -13,15 +16,18 @@ RUN go mod download
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o /newt
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /newt
|
||||
|
||||
FROM alpine:3.19 AS runner
|
||||
FROM alpine:3.23 AS runner
|
||||
|
||||
RUN apk --no-cache add ca-certificates
|
||||
RUN apk --no-cache add ca-certificates tzdata iputils
|
||||
|
||||
COPY --from=builder /newt /usr/local/bin/
|
||||
COPY entrypoint.sh /
|
||||
|
||||
# Admin/metrics endpoint (Prometheus scrape)
|
||||
EXPOSE 2112
|
||||
|
||||
RUN chmod +x /entrypoint.sh
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["newt"]
|
||||
CMD ["newt"]
|
||||
|
||||
85
Makefile
85
Makefile
@@ -1,37 +1,70 @@
|
||||
.PHONY: all local docker-build docker-build-release
|
||||
|
||||
all: build push
|
||||
all: local
|
||||
|
||||
local:
|
||||
CGO_ENABLED=0 go build -o ./bin/newt
|
||||
|
||||
docker-build:
|
||||
docker build -t fosrl/newt:latest .
|
||||
|
||||
docker-build-release:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-all tag=<tag>"; \
|
||||
echo "Error: tag is required. Usage: make docker-build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t fosrl/newt:latest -f Dockerfile --push .
|
||||
docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t fosrl/newt:$(tag) -f Dockerfile --push .
|
||||
docker buildx build . \
|
||||
--platform linux/arm/v7,linux/arm64,linux/amd64 \
|
||||
-t fosrl/newt:latest \
|
||||
-t fosrl/newt:$(tag) \
|
||||
-f Dockerfile \
|
||||
--push
|
||||
|
||||
build:
|
||||
docker build -t fosrl/newt:latest .
|
||||
.PHONY: go-build-release \
|
||||
go-build-release-linux-arm64 go-build-release-linux-arm32-v7 \
|
||||
go-build-release-linux-arm32-v6 go-build-release-linux-amd64 \
|
||||
go-build-release-linux-riscv64 go-build-release-darwin-arm64 \
|
||||
go-build-release-darwin-amd64 go-build-release-windows-amd64 \
|
||||
go-build-release-freebsd-amd64 go-build-release-freebsd-arm64
|
||||
|
||||
push:
|
||||
docker push fosrl/newt:latest
|
||||
go-build-release: \
|
||||
go-build-release-linux-arm64 \
|
||||
go-build-release-linux-arm32-v7 \
|
||||
go-build-release-linux-arm32-v6 \
|
||||
go-build-release-linux-amd64 \
|
||||
go-build-release-linux-riscv64 \
|
||||
go-build-release-darwin-arm64 \
|
||||
go-build-release-darwin-amd64 \
|
||||
go-build-release-windows-amd64 \
|
||||
go-build-release-freebsd-amd64 \
|
||||
go-build-release-freebsd-arm64
|
||||
|
||||
test:
|
||||
docker run fosrl/newt:latest
|
||||
|
||||
local:
|
||||
CGO_ENABLED=0 go build -o newt
|
||||
|
||||
go-build-release:
|
||||
go-build-release-linux-arm64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/newt_linux_arm64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -o bin/newt_linux_arm32
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build -o bin/newt_linux_arm32v6
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/newt_linux_amd64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -o bin/newt_linux_riscv64
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o bin/newt_darwin_arm64
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o bin/newt_darwin_amd64
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o bin/newt_windows_amd64.exe
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -o bin/newt_freebsd_amd64
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=arm64 go build -o bin/newt_freebsd_arm64
|
||||
|
||||
clean:
|
||||
rm newt
|
||||
go-build-release-linux-arm32-v7:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -o bin/newt_linux_arm32
|
||||
|
||||
go-build-release-linux-arm32-v6:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build -o bin/newt_linux_arm32v6
|
||||
|
||||
go-build-release-linux-amd64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/newt_linux_amd64
|
||||
|
||||
go-build-release-linux-riscv64:
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -o bin/newt_linux_riscv64
|
||||
|
||||
go-build-release-darwin-arm64:
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o bin/newt_darwin_arm64
|
||||
|
||||
go-build-release-darwin-amd64:
|
||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o bin/newt_darwin_amd64
|
||||
|
||||
go-build-release-windows-amd64:
|
||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o bin/newt_windows_amd64.exe
|
||||
|
||||
go-build-release-freebsd-amd64:
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -o bin/newt_freebsd_amd64
|
||||
|
||||
go-build-release-freebsd-arm64:
|
||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=arm64 go build -o bin/newt_freebsd_arm64
|
||||
|
||||
113
README.md
113
README.md
@@ -1,19 +1,24 @@
|
||||
# Newt
|
||||
|
||||
[](https://pkg.go.dev/github.com/fosrl/newt)
|
||||
[](https://github.com/fosrl/newt/blob/main/LICENSE)
|
||||
[](https://goreportcard.com/report/github.com/fosrl/newt)
|
||||
|
||||
Newt is a fully user space [WireGuard](https://www.wireguard.com/) tunnel client and TCP/UDP proxy, designed to securely expose private resources controlled by Pangolin. By using Newt, you don't need to manage complex WireGuard tunnels and NATing.
|
||||
|
||||
### Installation and Documentation
|
||||
## Installation and Documentation
|
||||
|
||||
Newt is used with Pangolin and Gerbil as part of the larger system. See documentation below:
|
||||
|
||||
- [Installation Instructions](https://docs.fossorial.io)
|
||||
- [Full Documentation](https://docs.fossorial.io)
|
||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||
|
||||
## Preview
|
||||
### Install via APT (Debian/Ubuntu)
|
||||
|
||||
<img src="public/screenshots/preview.png" alt="Preview"/>
|
||||
|
||||
_Sample output of a Newt container connected to Pangolin and hosting various resource target proxies._
|
||||
```bash
|
||||
curl -fsSL https://repo.dev.fosrl.io/apt/public.key | sudo gpg --dearmor -o /usr/share/keyrings/newt-archive-keyring.gpg
|
||||
echo "deb [signed-by=/usr/share/keyrings/newt-archive-keyring.gpg] https://repo.dev.fosrl.io/apt stable main" | sudo tee /etc/apt/sources.list.d/newt.list
|
||||
sudo apt update && sudo apt install newt
|
||||
```
|
||||
|
||||
## Key Functions
|
||||
|
||||
@@ -23,108 +28,32 @@ Using the Newt ID and a secret, the client will make HTTP requests to Pangolin t
|
||||
|
||||
### Receives WireGuard Control Messages
|
||||
|
||||
When Newt receives WireGuard control messages, it will use the information encoded (endpoint, public key) to bring up a WireGuard tunnel using [netstack](https://github.com/WireGuard/wireguard-go/blob/master/tun/netstack/examples/http_server.go) fully in user space. It will ping over the tunnel to ensure the peer on the Gerbil side is brought up.
|
||||
When Newt receives WireGuard control messages, it will use the information encoded (endpoint, public key) to bring up a WireGuard tunnel using [netstack](https://github.com/WireGuard/wireguard-go/blob/master/tun/netstack/examples/http_server.go) fully in user space. It will ping over the tunnel to ensure the peer on the Gerbil side is brought up.
|
||||
|
||||
### Receives Proxy Control Messages
|
||||
|
||||
When Newt receives WireGuard control messages, it will use the information encoded to create a local low level TCP and UDP proxies attached to the virtual tunnel in order to relay traffic to programmed targets.
|
||||
|
||||
## CLI Args
|
||||
|
||||
- `endpoint`: The endpoint where both Gerbil and Pangolin reside in order to connect to the websocket.
|
||||
- `id`: Newt ID generated by Pangolin to identify the client.
|
||||
- `secret`: A unique secret (not shared and kept private) used to authenticate the client ID with the websocket in order to receive commands.
|
||||
- `dns`: DNS server to use to resolve the endpoint
|
||||
- `log-level` (optional): The log level to use. Default: INFO
|
||||
- `updown` (optional): A script to be called when targets are added or removed.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
./newt \
|
||||
--id 31frd0uzbjvp721 \
|
||||
--secret h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6 \
|
||||
--endpoint https://example.com
|
||||
```
|
||||
|
||||
You can also run it with Docker compose. For example, a service in your `docker-compose.yml` might look like this using environment vars (recommended):
|
||||
|
||||
```yaml
|
||||
services:
|
||||
newt:
|
||||
image: fosrl/newt
|
||||
container_name: newt
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PANGOLIN_ENDPOINT=https://example.com
|
||||
- NEWT_ID=2ix2t8xk22ubpfy
|
||||
- NEWT_SECRET=nnisrfsdfc7prqsp9ewo1dvtvci50j5uiqotez00dgap0ii2
|
||||
```
|
||||
|
||||
You can also pass the CLI args to the container:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
newt:
|
||||
image: fosrl/newt
|
||||
container_name: newt
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- --id 31frd0uzbjvp721
|
||||
- --secret h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6
|
||||
- --endpoint https://example.com
|
||||
```
|
||||
|
||||
Finally a basic systemd service:
|
||||
|
||||
```
|
||||
[Unit]
|
||||
Description=Newt VPN Client
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/local/bin/newt --id 31frd0uzbjvp721 --secret h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6 --endpoint https://example.com
|
||||
Restart=always
|
||||
User=root
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
Make sure to `mv ./newt /usr/local/bin/newt`!
|
||||
|
||||
### Updown
|
||||
|
||||
You can pass in a updown script for Newt to call when it is adding or removing a target:
|
||||
|
||||
`--updown "python3 test.py"`
|
||||
|
||||
It will get called with args when a target is added:
|
||||
`python3 test.py add tcp localhost:8556`
|
||||
`python3 test.py remove tcp localhost:8556`
|
||||
|
||||
Returning a string from the script in the format of a target (`ip:dst` so `10.0.0.1:8080`) it will override the target and use this value instead to proxy.
|
||||
|
||||
You can look at updown.py as a reference script to get started!
|
||||
|
||||
## Build
|
||||
|
||||
### Container
|
||||
### Binary
|
||||
|
||||
Ensure Docker is installed.
|
||||
Make sure to have Go 1.25 installed.
|
||||
|
||||
```bash
|
||||
make
|
||||
```
|
||||
|
||||
### Binary
|
||||
|
||||
Make sure to have Go 1.23.1 installed.
|
||||
### Nix Flake
|
||||
|
||||
```bash
|
||||
make local
|
||||
nix build
|
||||
```
|
||||
|
||||
Binary will be at `./result/bin/newt`
|
||||
|
||||
Development shell available with `nix develop`
|
||||
|
||||
## Licensing
|
||||
|
||||
Newt is dual licensed under the AGPLv3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
151
authdaemon.go
Normal file
151
authdaemon.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/fosrl/newt/authdaemon"
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPrincipalsPath = "/var/run/auth-daemon/principals"
|
||||
defaultCACertPath = "/etc/ssh/ca.pem"
|
||||
)
|
||||
|
||||
var (
|
||||
errPresharedKeyRequired = errors.New("auth-daemon-key is required when --auth-daemon is enabled")
|
||||
errRootRequired = errors.New("auth-daemon must be run as root (use sudo)")
|
||||
authDaemonServer *authdaemon.Server // Global auth daemon server instance
|
||||
)
|
||||
|
||||
// startAuthDaemon initializes and starts the auth daemon in the background.
|
||||
// It validates requirements (Linux, root, preshared key) and starts the server
|
||||
// in a goroutine so it runs alongside normal newt operation.
|
||||
func startAuthDaemon(ctx context.Context) error {
|
||||
// Validation
|
||||
if runtime.GOOS != "linux" {
|
||||
return fmt.Errorf("auth-daemon is only supported on Linux, not %s", runtime.GOOS)
|
||||
}
|
||||
if os.Geteuid() != 0 {
|
||||
return errRootRequired
|
||||
}
|
||||
|
||||
// Use defaults if not set
|
||||
principalsFile := authDaemonPrincipalsFile
|
||||
if principalsFile == "" {
|
||||
principalsFile = defaultPrincipalsPath
|
||||
}
|
||||
caCertPath := authDaemonCACertPath
|
||||
if caCertPath == "" {
|
||||
caCertPath = defaultCACertPath
|
||||
}
|
||||
|
||||
// Create auth daemon server
|
||||
cfg := authdaemon.Config{
|
||||
DisableHTTPS: true, // We run without HTTP server in newt
|
||||
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
||||
PrincipalsFilePath: principalsFile,
|
||||
CACertPath: caCertPath,
|
||||
Force: true,
|
||||
}
|
||||
|
||||
srv, err := authdaemon.NewServer(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create auth daemon server: %w", err)
|
||||
}
|
||||
|
||||
authDaemonServer = srv
|
||||
|
||||
// Start the auth daemon in a goroutine so it runs alongside newt
|
||||
go func() {
|
||||
logger.Info("Auth daemon starting (native mode, no HTTP server)")
|
||||
if err := srv.Run(ctx); err != nil {
|
||||
logger.Error("Auth daemon error: %v", err)
|
||||
}
|
||||
logger.Info("Auth daemon stopped")
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
// runPrincipalsCmd executes the principals subcommand logic
|
||||
func runPrincipalsCmd(args []string) {
|
||||
opts := struct {
|
||||
PrincipalsFile string
|
||||
Username string
|
||||
}{
|
||||
PrincipalsFile: defaultPrincipalsPath,
|
||||
}
|
||||
|
||||
// Parse flags manually
|
||||
for i := 0; i < len(args); i++ {
|
||||
switch args[i] {
|
||||
case "--principals-file":
|
||||
if i+1 >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "Error: --principals-file requires a value\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
opts.PrincipalsFile = args[i+1]
|
||||
i++
|
||||
case "--username":
|
||||
if i+1 >= len(args) {
|
||||
fmt.Fprintf(os.Stderr, "Error: --username requires a value\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
opts.Username = args[i+1]
|
||||
i++
|
||||
case "--help", "-h":
|
||||
printPrincipalsHelp()
|
||||
os.Exit(0)
|
||||
default:
|
||||
fmt.Fprintf(os.Stderr, "Error: unknown flag: %s\n", args[i])
|
||||
printPrincipalsHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Validation
|
||||
if opts.Username == "" {
|
||||
fmt.Fprintf(os.Stderr, "Error: username is required\n")
|
||||
printPrincipalsHelp()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Get principals
|
||||
list, err := authdaemon.GetPrincipals(opts.PrincipalsFile, opts.Username)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if len(list) == 0 {
|
||||
fmt.Println("")
|
||||
return
|
||||
}
|
||||
for _, principal := range list {
|
||||
fmt.Println(principal)
|
||||
}
|
||||
}
|
||||
|
||||
func printPrincipalsHelp() {
|
||||
fmt.Fprintf(os.Stderr, `Usage: newt principals [flags]
|
||||
|
||||
Output principals for a username (for AuthorizedPrincipalsCommand in sshd_config).
|
||||
Read the principals file and print principals that match the given username, one per line.
|
||||
Configure in sshd_config with AuthorizedPrincipalsCommand and %%u for the username.
|
||||
|
||||
Flags:
|
||||
--principals-file string Path to the principals file (default "%s")
|
||||
--username string Username to look up (required)
|
||||
--help, -h Show this help message
|
||||
|
||||
Example:
|
||||
newt principals --username alice
|
||||
|
||||
`, defaultPrincipalsPath)
|
||||
}
|
||||
27
authdaemon/connection.go
Normal file
27
authdaemon/connection.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// ProcessConnection runs the same logic as POST /connection: CA cert, user create/reconcile, principals.
|
||||
// Use this when DisableHTTPS is true (e.g. embedded in Newt) instead of calling the API.
|
||||
func (s *Server) ProcessConnection(req ConnectionRequest) {
|
||||
logger.Info("connection: niceId=%q username=%q metadata.sudo=%v metadata.homedir=%v",
|
||||
req.NiceId, req.Username, req.Metadata.Sudo, req.Metadata.Homedir)
|
||||
|
||||
cfg := &s.cfg
|
||||
if cfg.CACertPath != "" {
|
||||
if err := writeCACertIfNotExists(cfg.CACertPath, req.CaCert, cfg.Force); err != nil {
|
||||
logger.Warn("auth-daemon: write CA cert: %v", err)
|
||||
}
|
||||
}
|
||||
if err := ensureUser(req.Username, req.Metadata); err != nil {
|
||||
logger.Warn("auth-daemon: ensure user: %v", err)
|
||||
}
|
||||
if cfg.PrincipalsFilePath != "" {
|
||||
if err := writePrincipals(cfg.PrincipalsFilePath, req.Username, req.NiceId); err != nil {
|
||||
logger.Warn("auth-daemon: write principals: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
224
authdaemon/host_linux.go
Normal file
224
authdaemon/host_linux.go
Normal file
@@ -0,0 +1,224 @@
|
||||
//go:build linux
|
||||
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// writeCACertIfNotExists writes contents to path. If the file already exists: when force is false, skip; when force is true, overwrite only if content differs.
|
||||
func writeCACertIfNotExists(path, contents string, force bool) error {
|
||||
contents = strings.TrimSpace(contents)
|
||||
if contents != "" && !strings.HasSuffix(contents, "\n") {
|
||||
contents += "\n"
|
||||
}
|
||||
existing, err := os.ReadFile(path)
|
||||
if err == nil {
|
||||
existingStr := strings.TrimSpace(string(existing))
|
||||
if existingStr != "" && !strings.HasSuffix(existingStr, "\n") {
|
||||
existingStr += "\n"
|
||||
}
|
||||
if existingStr == contents {
|
||||
logger.Debug("auth-daemon: CA cert unchanged at %s, skipping write", path)
|
||||
return nil
|
||||
}
|
||||
if !force {
|
||||
logger.Debug("auth-daemon: CA cert already exists at %s, skipping write (Force disabled)", path)
|
||||
return nil
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("read %s: %w", path, err)
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||
}
|
||||
if err := os.WriteFile(path, []byte(contents), 0644); err != nil {
|
||||
return fmt.Errorf("write CA cert: %w", err)
|
||||
}
|
||||
logger.Info("auth-daemon: wrote CA cert to %s", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePrincipals updates the principals file at path: JSON object keyed by username, value is array of principals. Adds username and niceId to that user's list (deduped).
|
||||
func writePrincipals(path, username, niceId string) error {
|
||||
if path == "" {
|
||||
return nil
|
||||
}
|
||||
username = strings.TrimSpace(username)
|
||||
niceId = strings.TrimSpace(niceId)
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
data := make(map[string][]string)
|
||||
if raw, err := os.ReadFile(path); err == nil {
|
||||
_ = json.Unmarshal(raw, &data)
|
||||
}
|
||||
list := data[username]
|
||||
seen := make(map[string]struct{}, len(list)+2)
|
||||
for _, p := range list {
|
||||
seen[p] = struct{}{}
|
||||
}
|
||||
for _, p := range []string{username, niceId} {
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[p]; !ok {
|
||||
seen[p] = struct{}{}
|
||||
list = append(list, p)
|
||||
}
|
||||
}
|
||||
data[username] = list
|
||||
body, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal principals: %w", err)
|
||||
}
|
||||
dir := filepath.Dir(path)
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("mkdir %s: %w", dir, err)
|
||||
}
|
||||
if err := os.WriteFile(path, body, 0644); err != nil {
|
||||
return fmt.Errorf("write principals: %w", err)
|
||||
}
|
||||
logger.Debug("auth-daemon: wrote principals to %s", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sudoGroup returns the name of the sudo group (wheel or sudo) that exists on the system. Prefers wheel.
|
||||
func sudoGroup() string {
|
||||
f, err := os.Open("/etc/group")
|
||||
if err != nil {
|
||||
return "sudo"
|
||||
}
|
||||
defer f.Close()
|
||||
sc := bufio.NewScanner(f)
|
||||
hasWheel := false
|
||||
hasSudo := false
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if strings.HasPrefix(line, "wheel:") {
|
||||
hasWheel = true
|
||||
}
|
||||
if strings.HasPrefix(line, "sudo:") {
|
||||
hasSudo = true
|
||||
}
|
||||
}
|
||||
if hasWheel {
|
||||
return "wheel"
|
||||
}
|
||||
if hasSudo {
|
||||
return "sudo"
|
||||
}
|
||||
return "sudo"
|
||||
}
|
||||
|
||||
// ensureUser creates the system user if missing, or reconciles sudo and homedir to match meta.
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
if username == "" {
|
||||
return nil
|
||||
}
|
||||
u, err := user.Lookup(username)
|
||||
if err != nil {
|
||||
if _, ok := err.(user.UnknownUserError); !ok {
|
||||
return fmt.Errorf("lookup user %s: %w", username, err)
|
||||
}
|
||||
return createUser(username, meta)
|
||||
}
|
||||
return reconcileUser(u, meta)
|
||||
}
|
||||
|
||||
func createUser(username string, meta ConnectionMetadata) error {
|
||||
args := []string{"-s", "/bin/bash"}
|
||||
if meta.Homedir {
|
||||
args = append(args, "-m")
|
||||
} else {
|
||||
args = append(args, "-M")
|
||||
}
|
||||
args = append(args, username)
|
||||
cmd := exec.Command("useradd", args...)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("useradd %s: %w (output: %s)", username, err, string(out))
|
||||
}
|
||||
logger.Info("auth-daemon: created user %s (homedir=%v)", username, meta.Homedir)
|
||||
if meta.Sudo {
|
||||
group := sudoGroup()
|
||||
cmd := exec.Command("usermod", "-aG", group, username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, username, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: added %s to %s", username, group)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mustAtoi(s string) int {
|
||||
n, _ := strconv.Atoi(s)
|
||||
return n
|
||||
}
|
||||
|
||||
func reconcileUser(u *user.User, meta ConnectionMetadata) error {
|
||||
group := sudoGroup()
|
||||
inGroup, err := userInGroup(u.Username, group)
|
||||
if err != nil {
|
||||
logger.Warn("auth-daemon: check group %s: %v", group, err)
|
||||
inGroup = false
|
||||
}
|
||||
if meta.Sudo && !inGroup {
|
||||
cmd := exec.Command("usermod", "-aG", group, u.Username)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, u.Username, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: added %s to %s", u.Username, group)
|
||||
}
|
||||
} else if !meta.Sudo && inGroup {
|
||||
cmd := exec.Command("gpasswd", "-d", u.Username, group)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
logger.Warn("auth-daemon: gpasswd -d %s %s: %v (output: %s)", u.Username, group, err, string(out))
|
||||
} else {
|
||||
logger.Info("auth-daemon: removed %s from %s", u.Username, group)
|
||||
}
|
||||
}
|
||||
if meta.Homedir && u.HomeDir != "" {
|
||||
if st, err := os.Stat(u.HomeDir); err != nil || !st.IsDir() {
|
||||
if err := os.MkdirAll(u.HomeDir, 0755); err != nil {
|
||||
logger.Warn("auth-daemon: mkdir %s: %v", u.HomeDir, err)
|
||||
} else {
|
||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||
_ = os.Chown(u.HomeDir, uid, gid)
|
||||
logger.Info("auth-daemon: created home %s for %s", u.HomeDir, u.Username)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func userInGroup(username, groupName string) (bool, error) {
|
||||
// getent group wheel returns "wheel:x:10:user1,user2"
|
||||
cmd := exec.Command("getent", "group", groupName)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
parts := strings.SplitN(strings.TrimSpace(string(out)), ":", 4)
|
||||
if len(parts) < 4 {
|
||||
return false, nil
|
||||
}
|
||||
members := strings.Split(parts[3], ",")
|
||||
for _, m := range members {
|
||||
if strings.TrimSpace(m) == username {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
22
authdaemon/host_stub.go
Normal file
22
authdaemon/host_stub.go
Normal file
@@ -0,0 +1,22 @@
|
||||
//go:build !linux
|
||||
|
||||
package authdaemon
|
||||
|
||||
import "fmt"
|
||||
|
||||
var errLinuxOnly = fmt.Errorf("auth-daemon PAM agent is only supported on Linux")
|
||||
|
||||
// writeCACertIfNotExists returns an error on non-Linux.
|
||||
func writeCACertIfNotExists(path, contents string, force bool) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
|
||||
// ensureUser returns an error on non-Linux.
|
||||
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
|
||||
// writePrincipals returns an error on non-Linux.
|
||||
func writePrincipals(path, username, niceId string) error {
|
||||
return errLinuxOnly
|
||||
}
|
||||
28
authdaemon/principals.go
Normal file
28
authdaemon/principals.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// GetPrincipals reads the principals data file at path, looks up the given user, and returns that user's principals as a string slice.
|
||||
// The file format is JSON: object with username keys and array-of-principals values, e.g. {"alice":["alice","usr-123"],"bob":["bob","usr-456"]}.
|
||||
// If the user is not found or the file is missing, returns nil and nil.
|
||||
func GetPrincipals(path, user string) ([]string, error) {
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("principals file path is required")
|
||||
}
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("read principals file: %w", err)
|
||||
}
|
||||
var m map[string][]string
|
||||
if err := json.Unmarshal(data, &m); err != nil {
|
||||
return nil, fmt.Errorf("parse principals file: %w", err)
|
||||
}
|
||||
return m[user], nil
|
||||
}
|
||||
56
authdaemon/routes.go
Normal file
56
authdaemon/routes.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// registerRoutes registers all API routes. Add new endpoints here.
|
||||
func (s *Server) registerRoutes() {
|
||||
s.mux.HandleFunc("/health", s.handleHealth)
|
||||
s.mux.HandleFunc("/connection", s.handleConnection)
|
||||
}
|
||||
|
||||
// ConnectionMetadata is the metadata object in POST /connection.
|
||||
type ConnectionMetadata struct {
|
||||
Sudo bool `json:"sudo"`
|
||||
Homedir bool `json:"homedir"`
|
||||
}
|
||||
|
||||
// ConnectionRequest is the JSON body for POST /connection.
|
||||
type ConnectionRequest struct {
|
||||
CaCert string `json:"caCert"`
|
||||
NiceId string `json:"niceId"`
|
||||
Username string `json:"username"`
|
||||
Metadata ConnectionMetadata `json:"metadata"`
|
||||
}
|
||||
|
||||
// healthResponse is the JSON body for GET /health.
|
||||
type healthResponse struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// handleHealth responds with 200 and {"status":"ok"}.
|
||||
func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_ = json.NewEncoder(w).Encode(healthResponse{Status: "ok"})
|
||||
}
|
||||
|
||||
// handleConnection accepts POST with connection payload and delegates to ProcessConnection.
|
||||
func (s *Server) handleConnection(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var req ConnectionRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, "Bad Request", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
s.ProcessConnection(req)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
179
authdaemon/server.go
Normal file
179
authdaemon/server.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package authdaemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
// DisableHTTPS: when true, Run() does not start the HTTPS server (for embedded use inside Newt). Call ProcessConnection directly for connection events.
|
||||
DisableHTTPS bool
|
||||
Port int // Required when DisableHTTPS is false. Listen port for the HTTPS server. No default.
|
||||
PresharedKey string // Required when DisableHTTPS is false. HTTP auth (Authorization: Bearer <key> or X-Preshared-Key: <key>). No default.
|
||||
CACertPath string // Required. Where to write the CA cert (e.g. /etc/ssh/ca.pem). No default.
|
||||
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
||||
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
cfg Config
|
||||
addr string
|
||||
presharedKey string
|
||||
mux *http.ServeMux
|
||||
tlsCert tls.Certificate
|
||||
}
|
||||
|
||||
// generateTLSCert creates a self-signed certificate and key in memory (no disk).
|
||||
func generateTLSCert() (tls.Certificate, error) {
|
||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("generate key: %w", err)
|
||||
}
|
||||
serial, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128))
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("serial: %w", err)
|
||||
}
|
||||
tmpl := &x509.Certificate{
|
||||
SerialNumber: serial,
|
||||
Subject: pkix.Name{
|
||||
CommonName: "localhost",
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(365 * 24 * time.Hour),
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
BasicConstraintsValid: true,
|
||||
DNSNames: []string{"localhost", "127.0.0.1"},
|
||||
}
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, key.Public(), key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("create certificate: %w", err)
|
||||
}
|
||||
keyDER, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("marshal key: %w", err)
|
||||
}
|
||||
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyDER})
|
||||
cert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("x509 key pair: %w", err)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// authMiddleware wraps next and requires a valid preshared key on every request.
|
||||
// Accepts Authorization: Bearer <key> or X-Preshared-Key: <key>.
|
||||
func (s *Server) authMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
key := ""
|
||||
if v := r.Header.Get("Authorization"); strings.HasPrefix(v, "Bearer ") {
|
||||
key = strings.TrimSpace(strings.TrimPrefix(v, "Bearer "))
|
||||
}
|
||||
if key == "" {
|
||||
key = strings.TrimSpace(r.Header.Get("X-Preshared-Key"))
|
||||
}
|
||||
if key == "" || subtle.ConstantTimeCompare([]byte(key), []byte(s.presharedKey)) != 1 {
|
||||
http.Error(w, "Unauthorized", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// NewServer builds a new auth-daemon server from cfg. Port, PresharedKey, CACertPath, and PrincipalsFilePath are required (no defaults).
|
||||
func NewServer(cfg Config) (*Server, error) {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil, fmt.Errorf("auth-daemon is only supported on Linux, not %s", runtime.GOOS)
|
||||
}
|
||||
if !cfg.DisableHTTPS {
|
||||
if cfg.Port <= 0 {
|
||||
return nil, fmt.Errorf("port is required and must be positive")
|
||||
}
|
||||
if cfg.PresharedKey == "" {
|
||||
return nil, fmt.Errorf("preshared key is required")
|
||||
}
|
||||
}
|
||||
if cfg.CACertPath == "" {
|
||||
return nil, fmt.Errorf("CACertPath is required")
|
||||
}
|
||||
if cfg.PrincipalsFilePath == "" {
|
||||
return nil, fmt.Errorf("PrincipalsFilePath is required")
|
||||
}
|
||||
s := &Server{cfg: cfg}
|
||||
if !cfg.DisableHTTPS {
|
||||
cert, err := generateTLSCert()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.addr = fmt.Sprintf(":%d", cfg.Port)
|
||||
s.presharedKey = cfg.PresharedKey
|
||||
s.mux = http.NewServeMux()
|
||||
s.tlsCert = cert
|
||||
s.registerRoutes()
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Run starts the HTTPS server (unless DisableHTTPS) and blocks until ctx is cancelled or the server errors.
|
||||
// When DisableHTTPS is true, Run() blocks on ctx only and does not listen; use ProcessConnection for connection events.
|
||||
func (s *Server) Run(ctx context.Context) error {
|
||||
if s.cfg.DisableHTTPS {
|
||||
logger.Info("auth-daemon running (HTTPS disabled)")
|
||||
<-ctx.Done()
|
||||
s.cleanupPrincipalsFile()
|
||||
return nil
|
||||
}
|
||||
tcfg := &tls.Config{
|
||||
Certificates: []tls.Certificate{s.tlsCert},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
}
|
||||
handler := s.authMiddleware(s.mux)
|
||||
srv := &http.Server{
|
||||
Addr: s.addr,
|
||||
Handler: handler,
|
||||
TLSConfig: tcfg,
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
IdleTimeout: 60 * time.Second,
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
shutdownCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
if err := srv.Shutdown(shutdownCtx); err != nil {
|
||||
logger.Warn("auth-daemon shutdown: %v", err)
|
||||
}
|
||||
}()
|
||||
logger.Info("auth-daemon listening on https://127.0.0.1%s", s.addr)
|
||||
if err := srv.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed {
|
||||
return err
|
||||
}
|
||||
s.cleanupPrincipalsFile()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) cleanupPrincipalsFile() {
|
||||
if s.cfg.PrincipalsFilePath != "" {
|
||||
if err := os.Remove(s.cfg.PrincipalsFilePath); err != nil && !os.IsNotExist(err) {
|
||||
logger.Warn("auth-daemon: remove principals file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
840
bind/shared_bind.go
Normal file
840
bind/shared_bind.go
Normal file
@@ -0,0 +1,840 @@
|
||||
//go:build !js
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
wgConn "golang.zx2c4.com/wireguard/conn"
|
||||
)
|
||||
|
||||
// Magic packet constants for connection testing
|
||||
// These packets are intercepted by SharedBind and responded to directly,
|
||||
// without being passed to the WireGuard device.
|
||||
var (
|
||||
// MagicTestRequest is the prefix for a test request packet
|
||||
// Format: PANGOLIN_TEST_REQ + 8 bytes of random data (for echo)
|
||||
MagicTestRequest = []byte("PANGOLIN_TEST_REQ")
|
||||
|
||||
// MagicTestResponse is the prefix for a test response packet
|
||||
// Format: PANGOLIN_TEST_RSP + 8 bytes echoed from request
|
||||
MagicTestResponse = []byte("PANGOLIN_TEST_RSP")
|
||||
)
|
||||
|
||||
const (
|
||||
// MagicPacketDataLen is the length of random data included in test packets
|
||||
MagicPacketDataLen = 8
|
||||
|
||||
// MagicTestRequestLen is the total length of a test request packet
|
||||
MagicTestRequestLen = 17 + MagicPacketDataLen // len("PANGOLIN_TEST_REQ") + 8
|
||||
|
||||
// MagicTestResponseLen is the total length of a test response packet
|
||||
MagicTestResponseLen = 17 + MagicPacketDataLen // len("PANGOLIN_TEST_RSP") + 8
|
||||
)
|
||||
|
||||
// PacketSource identifies where a packet came from
|
||||
type PacketSource uint8
|
||||
|
||||
const (
|
||||
SourceSocket PacketSource = iota // From physical UDP socket (hole-punched clients)
|
||||
SourceNetstack // From netstack (relay through main tunnel)
|
||||
)
|
||||
|
||||
// SourceAwareEndpoint wraps an endpoint with source information
|
||||
type SourceAwareEndpoint struct {
|
||||
wgConn.Endpoint
|
||||
source PacketSource
|
||||
}
|
||||
|
||||
// GetSource returns the source of this endpoint
|
||||
func (e *SourceAwareEndpoint) GetSource() PacketSource {
|
||||
return e.source
|
||||
}
|
||||
|
||||
// injectedPacket represents a packet injected into the SharedBind from an internal source
|
||||
type injectedPacket struct {
|
||||
data []byte
|
||||
endpoint wgConn.Endpoint
|
||||
}
|
||||
|
||||
// Endpoint represents a network endpoint for the SharedBind
|
||||
type Endpoint struct {
|
||||
AddrPort netip.AddrPort
|
||||
}
|
||||
|
||||
// ClearSrc implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) ClearSrc() {}
|
||||
|
||||
// DstIP implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) DstIP() netip.Addr {
|
||||
return e.AddrPort.Addr()
|
||||
}
|
||||
|
||||
// SrcIP implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) SrcIP() netip.Addr {
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
// DstToBytes implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) DstToBytes() []byte {
|
||||
b, _ := e.AddrPort.MarshalBinary()
|
||||
return b
|
||||
}
|
||||
|
||||
// DstToString implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) DstToString() string {
|
||||
return e.AddrPort.String()
|
||||
}
|
||||
|
||||
// SrcToString implements the wgConn.Endpoint interface
|
||||
func (e *Endpoint) SrcToString() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// SharedBind is a thread-safe UDP bind that can be shared between WireGuard
|
||||
// and hole punch senders. It wraps a single UDP connection and implements
|
||||
// reference counting to prevent premature closure.
|
||||
// It also supports receiving packets from a netstack and routing responses
|
||||
// back through the appropriate source.
|
||||
type SharedBind struct {
|
||||
mu sync.RWMutex
|
||||
|
||||
// The underlying UDP connection (for hole-punched clients)
|
||||
udpConn *net.UDPConn
|
||||
|
||||
// IPv4 and IPv6 packet connections for advanced features
|
||||
ipv4PC *ipv4.PacketConn
|
||||
ipv6PC *ipv6.PacketConn
|
||||
|
||||
// Reference counting to prevent closing while in use
|
||||
refCount atomic.Int32
|
||||
closed atomic.Bool
|
||||
|
||||
// Channels for receiving data
|
||||
recvFuncs []wgConn.ReceiveFunc
|
||||
|
||||
// Port binding information
|
||||
port uint16
|
||||
|
||||
// Channel for packets from netstack (from direct relay) - larger buffer for throughput
|
||||
netstackPackets chan injectedPacket
|
||||
|
||||
// Netstack connection for sending responses back through the tunnel
|
||||
// Using atomic.Pointer for lock-free access in hot path
|
||||
netstackConn atomic.Pointer[net.PacketConn]
|
||||
|
||||
// Track which endpoints came from netstack (key: netip.AddrPort, value: struct{})
|
||||
// Using netip.AddrPort directly as key is more efficient than string
|
||||
netstackEndpoints sync.Map
|
||||
|
||||
// Pre-allocated message buffers for batch operations (Linux only)
|
||||
ipv4Msgs []ipv4.Message
|
||||
|
||||
// Shutdown signal for receive goroutines
|
||||
closeChan chan struct{}
|
||||
|
||||
// Callback for magic test responses (used for holepunch testing)
|
||||
magicResponseCallback atomic.Pointer[func(addr netip.AddrPort, echoData []byte)]
|
||||
|
||||
// Rebinding state - used to keep receive goroutines alive during socket transition
|
||||
rebinding bool // true when socket is being replaced
|
||||
rebindingCond *sync.Cond // signaled when rebind completes
|
||||
}
|
||||
|
||||
// MagicResponseCallback is the function signature for magic packet response callbacks
|
||||
type MagicResponseCallback func(addr netip.AddrPort, echoData []byte)
|
||||
|
||||
// New creates a new SharedBind from an existing UDP connection.
|
||||
// The SharedBind takes ownership of the connection and will close it
|
||||
// when all references are released.
|
||||
func New(udpConn *net.UDPConn) (*SharedBind, error) {
|
||||
if udpConn == nil {
|
||||
return nil, fmt.Errorf("udpConn cannot be nil")
|
||||
}
|
||||
|
||||
bind := &SharedBind{
|
||||
udpConn: udpConn,
|
||||
netstackPackets: make(chan injectedPacket, 1024), // Larger buffer for better throughput
|
||||
closeChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
// Initialize the rebinding condition variable
|
||||
bind.rebindingCond = sync.NewCond(&bind.mu)
|
||||
|
||||
// Initialize reference count to 1 (the creator holds the first reference)
|
||||
bind.refCount.Store(1)
|
||||
|
||||
// Get the local port
|
||||
if addr, ok := udpConn.LocalAddr().(*net.UDPAddr); ok {
|
||||
bind.port = uint16(addr.Port)
|
||||
}
|
||||
|
||||
return bind, nil
|
||||
}
|
||||
|
||||
// SetNetstackConn sets the netstack connection for receiving/sending packets through the tunnel.
|
||||
// This connection is used for relay traffic that should go back through the main tunnel.
|
||||
func (b *SharedBind) SetNetstackConn(conn net.PacketConn) {
|
||||
b.netstackConn.Store(&conn)
|
||||
}
|
||||
|
||||
// GetNetstackConn returns the netstack connection if set
|
||||
func (b *SharedBind) GetNetstackConn() net.PacketConn {
|
||||
ptr := b.netstackConn.Load()
|
||||
if ptr == nil {
|
||||
return nil
|
||||
}
|
||||
return *ptr
|
||||
}
|
||||
|
||||
// InjectPacket allows injecting a packet directly into the SharedBind's receive path.
|
||||
// This is used for direct relay from netstack without going through the host network.
|
||||
// The fromAddr should be the address the packet appears to come from.
|
||||
func (b *SharedBind) InjectPacket(data []byte, fromAddr netip.AddrPort) error {
|
||||
if b.closed.Load() {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
// Unmap IPv4-in-IPv6 addresses to ensure consistency with parsed endpoints
|
||||
if fromAddr.Addr().Is4In6() {
|
||||
fromAddr = netip.AddrPortFrom(fromAddr.Addr().Unmap(), fromAddr.Port())
|
||||
}
|
||||
|
||||
// Track this endpoint as coming from netstack so responses go back the same way
|
||||
// Use AddrPort directly as key (more efficient than string)
|
||||
b.netstackEndpoints.Store(fromAddr, struct{}{})
|
||||
|
||||
// Make a copy of the data to avoid issues with buffer reuse
|
||||
dataCopy := make([]byte, len(data))
|
||||
copy(dataCopy, data)
|
||||
|
||||
select {
|
||||
case b.netstackPackets <- injectedPacket{
|
||||
data: dataCopy,
|
||||
endpoint: &wgConn.StdNetEndpoint{AddrPort: fromAddr},
|
||||
}:
|
||||
return nil
|
||||
case <-b.closeChan:
|
||||
return net.ErrClosed
|
||||
default:
|
||||
// Channel full, drop the packet
|
||||
return fmt.Errorf("netstack packet buffer full")
|
||||
}
|
||||
}
|
||||
|
||||
// AddRef increments the reference count. Call this when sharing
|
||||
// the bind with another component.
|
||||
func (b *SharedBind) AddRef() {
|
||||
newCount := b.refCount.Add(1)
|
||||
// Optional: Add logging for debugging
|
||||
_ = newCount // Placeholder for potential logging
|
||||
}
|
||||
|
||||
// Release decrements the reference count. When it reaches zero,
|
||||
// the underlying UDP connection is closed.
|
||||
func (b *SharedBind) Release() error {
|
||||
newCount := b.refCount.Add(-1)
|
||||
// Optional: Add logging for debugging
|
||||
_ = newCount // Placeholder for potential logging
|
||||
|
||||
if newCount < 0 {
|
||||
// This should never happen with proper usage
|
||||
b.refCount.Store(0)
|
||||
return fmt.Errorf("SharedBind reference count went negative")
|
||||
}
|
||||
|
||||
if newCount == 0 {
|
||||
return b.closeConnection()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeConnection actually closes the UDP connection
|
||||
func (b *SharedBind) closeConnection() error {
|
||||
if !b.closed.CompareAndSwap(false, true) {
|
||||
// Already closed
|
||||
return nil
|
||||
}
|
||||
|
||||
// Signal all goroutines to stop
|
||||
close(b.closeChan)
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
var err error
|
||||
if b.udpConn != nil {
|
||||
err = b.udpConn.Close()
|
||||
b.udpConn = nil
|
||||
}
|
||||
|
||||
b.ipv4PC = nil
|
||||
b.ipv6PC = nil
|
||||
|
||||
// Clear netstack connection (but don't close it - it's managed externally)
|
||||
b.netstackConn.Store(nil)
|
||||
|
||||
// Clear tracked netstack endpoints
|
||||
b.netstackEndpoints = sync.Map{}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ClearNetstackConn clears the netstack connection and tracked endpoints.
|
||||
// Call this when stopping the relay.
|
||||
func (b *SharedBind) ClearNetstackConn() {
|
||||
b.netstackConn.Store(nil)
|
||||
|
||||
// Clear tracked netstack endpoints
|
||||
b.netstackEndpoints = sync.Map{}
|
||||
}
|
||||
|
||||
// GetUDPConn returns the underlying UDP connection.
|
||||
// The caller must not close this connection directly.
|
||||
func (b *SharedBind) GetUDPConn() *net.UDPConn {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.udpConn
|
||||
}
|
||||
|
||||
// GetRefCount returns the current reference count (for debugging)
|
||||
func (b *SharedBind) GetRefCount() int32 {
|
||||
return b.refCount.Load()
|
||||
}
|
||||
|
||||
// IsClosed returns whether the bind is closed
|
||||
func (b *SharedBind) IsClosed() bool {
|
||||
return b.closed.Load()
|
||||
}
|
||||
|
||||
// GetPort returns the current UDP port the bind is using.
|
||||
// This is useful when rebinding to try to reuse the same port.
|
||||
func (b *SharedBind) GetPort() uint16 {
|
||||
b.mu.RLock()
|
||||
defer b.mu.RUnlock()
|
||||
return b.port
|
||||
}
|
||||
|
||||
// CloseSocket closes the underlying UDP connection to release the port,
|
||||
// but keeps the SharedBind in a state where it can accept a new connection via Rebind.
|
||||
// This allows the caller to close the old socket first, then bind a new socket
|
||||
// to the same port before calling Rebind.
|
||||
//
|
||||
// Returns the port that was being used, so the caller can attempt to rebind to it.
|
||||
// Sets the rebinding flag so receive goroutines will wait for the new socket
|
||||
// instead of exiting.
|
||||
func (b *SharedBind) CloseSocket() (uint16, error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.closed.Load() {
|
||||
return 0, fmt.Errorf("bind is closed")
|
||||
}
|
||||
|
||||
port := b.port
|
||||
|
||||
// Set rebinding flag BEFORE closing the socket so receive goroutines
|
||||
// know to wait instead of exit
|
||||
b.rebinding = true
|
||||
|
||||
// Close the old connection to release the port
|
||||
if b.udpConn != nil {
|
||||
logger.Debug("Closing UDP connection to release port %d (rebinding)", port)
|
||||
b.udpConn.Close()
|
||||
b.udpConn = nil
|
||||
}
|
||||
|
||||
return port, nil
|
||||
}
|
||||
|
||||
// Rebind replaces the underlying UDP connection with a new one.
|
||||
// This is necessary when network connectivity changes (e.g., WiFi to cellular
|
||||
// transition on macOS/iOS) and the old socket becomes stale.
|
||||
//
|
||||
// The caller is responsible for creating the new UDP connection and passing it here.
|
||||
// After rebind, the caller should trigger a hole punch to re-establish NAT mappings.
|
||||
//
|
||||
// Note: Call CloseSocket() first if you need to rebind to the same port, as the
|
||||
// old socket must be closed before a new socket can bind to the same port.
|
||||
func (b *SharedBind) Rebind(newConn *net.UDPConn) error {
|
||||
if newConn == nil {
|
||||
return fmt.Errorf("newConn cannot be nil")
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.closed.Load() {
|
||||
return fmt.Errorf("bind is closed")
|
||||
}
|
||||
|
||||
// Close the old connection if it's still open
|
||||
// (it may have already been closed via CloseSocket)
|
||||
if b.udpConn != nil {
|
||||
logger.Debug("Closing old UDP connection during rebind")
|
||||
b.udpConn.Close()
|
||||
}
|
||||
|
||||
// Set up the new connection
|
||||
b.udpConn = newConn
|
||||
|
||||
// Update packet connections for the new socket
|
||||
if runtime.GOOS == "linux" || runtime.GOOS == "android" {
|
||||
b.ipv4PC = ipv4.NewPacketConn(newConn)
|
||||
b.ipv6PC = ipv6.NewPacketConn(newConn)
|
||||
|
||||
// Re-initialize message buffers for batch operations
|
||||
batchSize := wgConn.IdealBatchSize
|
||||
b.ipv4Msgs = make([]ipv4.Message, batchSize)
|
||||
for i := range b.ipv4Msgs {
|
||||
b.ipv4Msgs[i].OOB = make([]byte, 0)
|
||||
}
|
||||
} else {
|
||||
// For non-Linux platforms, still set up ipv4PC for consistency
|
||||
b.ipv4PC = ipv4.NewPacketConn(newConn)
|
||||
b.ipv6PC = ipv6.NewPacketConn(newConn)
|
||||
}
|
||||
|
||||
// Update the port
|
||||
if addr, ok := newConn.LocalAddr().(*net.UDPAddr); ok {
|
||||
b.port = uint16(addr.Port)
|
||||
logger.Info("Rebound UDP socket to port %d", b.port)
|
||||
}
|
||||
|
||||
// Clear the rebinding flag and wake up any waiting receive goroutines
|
||||
b.rebinding = false
|
||||
b.rebindingCond.Broadcast()
|
||||
|
||||
logger.Debug("Rebind complete, signaled waiting receive goroutines")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMagicResponseCallback sets a callback function that will be called when
|
||||
// a magic test response packet is received. This is used for holepunch testing.
|
||||
// Pass nil to clear the callback.
|
||||
func (b *SharedBind) SetMagicResponseCallback(callback MagicResponseCallback) {
|
||||
if callback == nil {
|
||||
b.magicResponseCallback.Store(nil)
|
||||
} else {
|
||||
// Convert to the function type the atomic.Pointer expects
|
||||
fn := func(addr netip.AddrPort, echoData []byte) {
|
||||
callback(addr, echoData)
|
||||
}
|
||||
b.magicResponseCallback.Store(&fn)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteToUDP writes data to a specific UDP address.
|
||||
// This is thread-safe and can be used by hole punch senders.
|
||||
func (b *SharedBind) WriteToUDP(data []byte, addr *net.UDPAddr) (int, error) {
|
||||
if b.closed.Load() {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
b.mu.RLock()
|
||||
conn := b.udpConn
|
||||
b.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
return conn.WriteToUDP(data, addr)
|
||||
}
|
||||
|
||||
// Close implements the WireGuard Bind interface.
|
||||
// It decrements the reference count and closes the connection if no references remain.
|
||||
func (b *SharedBind) Close() error {
|
||||
return b.Release()
|
||||
}
|
||||
|
||||
// Open implements the WireGuard Bind interface.
|
||||
// Since the connection is already open, this just sets up the receive functions.
|
||||
func (b *SharedBind) Open(uport uint16) ([]wgConn.ReceiveFunc, uint16, error) {
|
||||
if b.closed.Load() {
|
||||
return nil, 0, net.ErrClosed
|
||||
}
|
||||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if b.udpConn == nil {
|
||||
return nil, 0, net.ErrClosed
|
||||
}
|
||||
|
||||
// Set up IPv4 and IPv6 packet connections for advanced features
|
||||
if runtime.GOOS == "linux" || runtime.GOOS == "android" {
|
||||
b.ipv4PC = ipv4.NewPacketConn(b.udpConn)
|
||||
b.ipv6PC = ipv6.NewPacketConn(b.udpConn)
|
||||
|
||||
// Pre-allocate message buffers for batch operations
|
||||
batchSize := wgConn.IdealBatchSize
|
||||
b.ipv4Msgs = make([]ipv4.Message, batchSize)
|
||||
for i := range b.ipv4Msgs {
|
||||
b.ipv4Msgs[i].OOB = make([]byte, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// Create receive functions - one for socket, one for netstack
|
||||
recvFuncs := make([]wgConn.ReceiveFunc, 0, 2)
|
||||
|
||||
// Add socket receive function (reads from physical UDP socket)
|
||||
recvFuncs = append(recvFuncs, b.makeReceiveSocket())
|
||||
|
||||
// Add netstack receive function (reads from injected packets channel)
|
||||
recvFuncs = append(recvFuncs, b.makeReceiveNetstack())
|
||||
|
||||
b.recvFuncs = recvFuncs
|
||||
return recvFuncs, b.port, nil
|
||||
}
|
||||
|
||||
// makeReceiveSocket creates a receive function for physical UDP socket packets
|
||||
func (b *SharedBind) makeReceiveSocket() wgConn.ReceiveFunc {
|
||||
return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) {
|
||||
for {
|
||||
if b.closed.Load() {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
b.mu.RLock()
|
||||
conn := b.udpConn
|
||||
pc := b.ipv4PC
|
||||
b.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
// Socket is nil - check if we're rebinding or truly closed
|
||||
if b.closed.Load() {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
// Wait for rebind to complete
|
||||
b.mu.Lock()
|
||||
for b.rebinding && !b.closed.Load() {
|
||||
logger.Debug("Receive goroutine waiting for socket rebind to complete")
|
||||
b.rebindingCond.Wait()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
// Check again after waking up
|
||||
if b.closed.Load() {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
// Loop back to retry with new socket
|
||||
continue
|
||||
}
|
||||
|
||||
// Use batch reading on Linux for performance
|
||||
var n int
|
||||
var err error
|
||||
if pc != nil && (runtime.GOOS == "linux" || runtime.GOOS == "android") {
|
||||
n, err = b.receiveIPv4Batch(pc, bufs, sizes, eps)
|
||||
} else {
|
||||
n, err = b.receiveIPv4Simple(conn, bufs, sizes, eps)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Check if this error is due to rebinding
|
||||
b.mu.RLock()
|
||||
rebinding := b.rebinding
|
||||
b.mu.RUnlock()
|
||||
|
||||
if rebinding {
|
||||
logger.Debug("Receive got error during rebind, waiting for new socket: %v", err)
|
||||
// Wait for rebind to complete and retry
|
||||
b.mu.Lock()
|
||||
for b.rebinding && !b.closed.Load() {
|
||||
b.rebindingCond.Wait()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
if b.closed.Load() {
|
||||
return 0, net.ErrClosed
|
||||
}
|
||||
|
||||
// Retry with new socket
|
||||
continue
|
||||
}
|
||||
|
||||
// Not rebinding, return the error
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return n, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// makeReceiveNetstack creates a receive function for netstack-injected packets
|
||||
func (b *SharedBind) makeReceiveNetstack() wgConn.ReceiveFunc {
|
||||
return func(bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (n int, err error) {
|
||||
select {
|
||||
case <-b.closeChan:
|
||||
return 0, net.ErrClosed
|
||||
case pkt := <-b.netstackPackets:
|
||||
if len(pkt.data) <= len(bufs[0]) {
|
||||
copy(bufs[0], pkt.data)
|
||||
sizes[0] = len(pkt.data)
|
||||
eps[0] = pkt.endpoint
|
||||
return 1, nil
|
||||
}
|
||||
// Packet too large for buffer, skip it
|
||||
return 0, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receiveIPv4Batch uses batch reading for better performance on Linux
|
||||
func (b *SharedBind) receiveIPv4Batch(pc *ipv4.PacketConn, bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (int, error) {
|
||||
// Use pre-allocated messages, just update buffer pointers
|
||||
numBufs := len(bufs)
|
||||
if numBufs > len(b.ipv4Msgs) {
|
||||
numBufs = len(b.ipv4Msgs)
|
||||
}
|
||||
|
||||
for i := 0; i < numBufs; i++ {
|
||||
b.ipv4Msgs[i].Buffers = [][]byte{bufs[i]}
|
||||
}
|
||||
|
||||
numMsgs, err := pc.ReadBatch(b.ipv4Msgs[:numBufs], 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Process messages and filter out magic packets
|
||||
writeIdx := 0
|
||||
for i := 0; i < numMsgs; i++ {
|
||||
if b.ipv4Msgs[i].N == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for magic packet
|
||||
if b.ipv4Msgs[i].Addr != nil {
|
||||
if udpAddr, ok := b.ipv4Msgs[i].Addr.(*net.UDPAddr); ok {
|
||||
data := bufs[i][:b.ipv4Msgs[i].N]
|
||||
if b.handleMagicPacket(data, udpAddr) {
|
||||
// Magic packet handled, skip this message
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Not a magic packet, include in output
|
||||
if writeIdx != i {
|
||||
// Need to copy data to the correct position
|
||||
copy(bufs[writeIdx], bufs[i][:b.ipv4Msgs[i].N])
|
||||
}
|
||||
sizes[writeIdx] = b.ipv4Msgs[i].N
|
||||
|
||||
if b.ipv4Msgs[i].Addr != nil {
|
||||
if udpAddr, ok := b.ipv4Msgs[i].Addr.(*net.UDPAddr); ok {
|
||||
addrPort := udpAddr.AddrPort()
|
||||
// Unmap IPv4-in-IPv6 addresses to ensure consistency with parsed endpoints
|
||||
if addrPort.Addr().Is4In6() {
|
||||
addrPort = netip.AddrPortFrom(addrPort.Addr().Unmap(), addrPort.Port())
|
||||
}
|
||||
eps[writeIdx] = &wgConn.StdNetEndpoint{AddrPort: addrPort}
|
||||
}
|
||||
}
|
||||
writeIdx++
|
||||
}
|
||||
|
||||
return writeIdx, nil
|
||||
}
|
||||
|
||||
// receiveIPv4Simple uses simple ReadFromUDP for non-Linux platforms
|
||||
func (b *SharedBind) receiveIPv4Simple(conn *net.UDPConn, bufs [][]byte, sizes []int, eps []wgConn.Endpoint) (int, error) {
|
||||
// No read deadline - we rely on socket close to unblock during rebind.
|
||||
// The caller (makeReceiveSocket) handles rebind state when errors occur.
|
||||
for {
|
||||
n, addr, err := conn.ReadFromUDP(bufs[0])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Check for magic test packet and handle it directly
|
||||
if b.handleMagicPacket(bufs[0][:n], addr) {
|
||||
// Magic packet was handled, read another packet
|
||||
continue
|
||||
}
|
||||
|
||||
sizes[0] = n
|
||||
if addr != nil {
|
||||
addrPort := addr.AddrPort()
|
||||
// Unmap IPv4-in-IPv6 addresses to ensure consistency with parsed endpoints
|
||||
if addrPort.Addr().Is4In6() {
|
||||
addrPort = netip.AddrPortFrom(addrPort.Addr().Unmap(), addrPort.Port())
|
||||
}
|
||||
eps[0] = &wgConn.StdNetEndpoint{AddrPort: addrPort}
|
||||
}
|
||||
|
||||
return 1, nil
|
||||
}
|
||||
}
|
||||
|
||||
// handleMagicPacket checks if the packet is a magic test packet and responds if so.
|
||||
// Returns true if the packet was a magic packet and was handled (should not be passed to WireGuard).
|
||||
func (b *SharedBind) handleMagicPacket(data []byte, addr *net.UDPAddr) bool {
|
||||
// Check if this is a test request packet
|
||||
if len(data) >= MagicTestRequestLen && bytes.HasPrefix(data, MagicTestRequest) {
|
||||
// logger.Debug("Received magic test REQUEST from %s, sending response", addr.String())
|
||||
// Extract the random data portion to echo back
|
||||
echoData := data[len(MagicTestRequest) : len(MagicTestRequest)+MagicPacketDataLen]
|
||||
|
||||
// Build response packet
|
||||
response := make([]byte, MagicTestResponseLen)
|
||||
copy(response, MagicTestResponse)
|
||||
copy(response[len(MagicTestResponse):], echoData)
|
||||
|
||||
// Send response back to sender
|
||||
b.mu.RLock()
|
||||
conn := b.udpConn
|
||||
b.mu.RUnlock()
|
||||
|
||||
if conn != nil {
|
||||
_, _ = conn.WriteToUDP(response, addr)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Check if this is a test response packet
|
||||
if len(data) >= MagicTestResponseLen && bytes.HasPrefix(data, MagicTestResponse) {
|
||||
// logger.Debug("Received magic test RESPONSE from %s", addr.String())
|
||||
// Extract the echoed data
|
||||
echoData := data[len(MagicTestResponse) : len(MagicTestResponse)+MagicPacketDataLen]
|
||||
|
||||
// Call the callback if set
|
||||
callbackPtr := b.magicResponseCallback.Load()
|
||||
if callbackPtr != nil {
|
||||
callback := *callbackPtr
|
||||
addrPort := addr.AddrPort()
|
||||
// Unmap IPv4-in-IPv6 addresses to ensure consistency
|
||||
if addrPort.Addr().Is4In6() {
|
||||
addrPort = netip.AddrPortFrom(addrPort.Addr().Unmap(), addrPort.Port())
|
||||
}
|
||||
callback(addrPort, echoData)
|
||||
} else {
|
||||
logger.Debug("Magic response received but no callback registered")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Send implements the WireGuard Bind interface.
|
||||
// It sends packets to the specified endpoint, routing through the appropriate
|
||||
// source (netstack or physical socket) based on where the endpoint's packets came from.
|
||||
func (b *SharedBind) Send(bufs [][]byte, ep wgConn.Endpoint) error {
|
||||
if b.closed.Load() {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
// Extract the destination address from the endpoint
|
||||
var destAddrPort netip.AddrPort
|
||||
|
||||
// Try to cast to StdNetEndpoint first (most common case, avoid allocations)
|
||||
if stdEp, ok := ep.(*wgConn.StdNetEndpoint); ok {
|
||||
destAddrPort = stdEp.AddrPort
|
||||
} else {
|
||||
// Fallback: construct from DstIP and DstToBytes
|
||||
dstBytes := ep.DstToBytes()
|
||||
if len(dstBytes) >= 6 { // Minimum for IPv4 (4 bytes) + port (2 bytes)
|
||||
var addr netip.Addr
|
||||
var port uint16
|
||||
|
||||
if len(dstBytes) >= 18 { // IPv6 (16 bytes) + port (2 bytes)
|
||||
addr, _ = netip.AddrFromSlice(dstBytes[:16])
|
||||
port = uint16(dstBytes[16]) | uint16(dstBytes[17])<<8
|
||||
} else { // IPv4
|
||||
addr, _ = netip.AddrFromSlice(dstBytes[:4])
|
||||
port = uint16(dstBytes[4]) | uint16(dstBytes[5])<<8
|
||||
}
|
||||
|
||||
if addr.IsValid() {
|
||||
destAddrPort = netip.AddrPortFrom(addr, port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !destAddrPort.IsValid() {
|
||||
return fmt.Errorf("could not extract destination address from endpoint")
|
||||
}
|
||||
|
||||
// Check if this endpoint came from netstack - if so, send through netstack
|
||||
// Use AddrPort directly as key (more efficient than string conversion)
|
||||
if _, isNetstackEndpoint := b.netstackEndpoints.Load(destAddrPort); isNetstackEndpoint {
|
||||
connPtr := b.netstackConn.Load()
|
||||
if connPtr != nil && *connPtr != nil {
|
||||
netstackConn := *connPtr
|
||||
destAddr := net.UDPAddrFromAddrPort(destAddrPort)
|
||||
// Send all buffers through netstack
|
||||
for _, buf := range bufs {
|
||||
_, err := netstackConn.WriteTo(buf, destAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Fall through to socket if netstack conn not available
|
||||
}
|
||||
|
||||
// Send through the physical UDP socket (for hole-punched clients)
|
||||
b.mu.RLock()
|
||||
conn := b.udpConn
|
||||
b.mu.RUnlock()
|
||||
|
||||
if conn == nil {
|
||||
return net.ErrClosed
|
||||
}
|
||||
|
||||
destAddr := net.UDPAddrFromAddrPort(destAddrPort)
|
||||
|
||||
// Send all buffers to the destination
|
||||
for _, buf := range bufs {
|
||||
_, err := conn.WriteToUDP(buf, destAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMark implements the WireGuard Bind interface.
|
||||
// It's a no-op for this implementation.
|
||||
func (b *SharedBind) SetMark(mark uint32) error {
|
||||
// Not implemented for this use case
|
||||
return nil
|
||||
}
|
||||
|
||||
// BatchSize returns the preferred batch size for sending packets.
|
||||
func (b *SharedBind) BatchSize() int {
|
||||
if runtime.GOOS == "linux" || runtime.GOOS == "android" {
|
||||
return wgConn.IdealBatchSize
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// ParseEndpoint creates a new endpoint from a string address.
|
||||
func (b *SharedBind) ParseEndpoint(s string) (wgConn.Endpoint, error) {
|
||||
addrPort, err := netip.ParseAddrPort(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &wgConn.StdNetEndpoint{AddrPort: addrPort}, nil
|
||||
}
|
||||
555
bind/shared_bind_test.go
Normal file
555
bind/shared_bind_test.go
Normal file
@@ -0,0 +1,555 @@
|
||||
//go:build !js
|
||||
|
||||
package bind
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
wgConn "golang.zx2c4.com/wireguard/conn"
|
||||
)
|
||||
|
||||
// TestSharedBindCreation tests basic creation and initialization
|
||||
func TestSharedBindCreation(t *testing.T) {
|
||||
// Create a UDP connection
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create UDP connection: %v", err)
|
||||
}
|
||||
defer udpConn.Close()
|
||||
|
||||
// Create SharedBind
|
||||
bind, err := New(udpConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
|
||||
if bind == nil {
|
||||
t.Fatal("SharedBind is nil")
|
||||
}
|
||||
|
||||
// Verify initial reference count
|
||||
if bind.refCount.Load() != 1 {
|
||||
t.Errorf("Expected initial refCount to be 1, got %d", bind.refCount.Load())
|
||||
}
|
||||
|
||||
// Clean up
|
||||
if err := bind.Close(); err != nil {
|
||||
t.Errorf("Failed to close SharedBind: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharedBindReferenceCount tests reference counting
|
||||
func TestSharedBindReferenceCount(t *testing.T) {
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create UDP connection: %v", err)
|
||||
}
|
||||
|
||||
bind, err := New(udpConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
|
||||
// Add references
|
||||
bind.AddRef()
|
||||
if bind.refCount.Load() != 2 {
|
||||
t.Errorf("Expected refCount to be 2, got %d", bind.refCount.Load())
|
||||
}
|
||||
|
||||
bind.AddRef()
|
||||
if bind.refCount.Load() != 3 {
|
||||
t.Errorf("Expected refCount to be 3, got %d", bind.refCount.Load())
|
||||
}
|
||||
|
||||
// Release references
|
||||
bind.Release()
|
||||
if bind.refCount.Load() != 2 {
|
||||
t.Errorf("Expected refCount to be 2 after release, got %d", bind.refCount.Load())
|
||||
}
|
||||
|
||||
bind.Release()
|
||||
bind.Release() // This should close the connection
|
||||
|
||||
if !bind.closed.Load() {
|
||||
t.Error("Expected bind to be closed after all references released")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharedBindWriteToUDP tests the WriteToUDP functionality
|
||||
func TestSharedBindWriteToUDP(t *testing.T) {
|
||||
// Create sender
|
||||
senderConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender UDP connection: %v", err)
|
||||
}
|
||||
|
||||
senderBind, err := New(senderConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender SharedBind: %v", err)
|
||||
}
|
||||
defer senderBind.Close()
|
||||
|
||||
// Create receiver
|
||||
receiverConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create receiver UDP connection: %v", err)
|
||||
}
|
||||
defer receiverConn.Close()
|
||||
|
||||
receiverAddr := receiverConn.LocalAddr().(*net.UDPAddr)
|
||||
|
||||
// Send data
|
||||
testData := []byte("Hello, SharedBind!")
|
||||
n, err := senderBind.WriteToUDP(testData, receiverAddr)
|
||||
if err != nil {
|
||||
t.Fatalf("WriteToUDP failed: %v", err)
|
||||
}
|
||||
|
||||
if n != len(testData) {
|
||||
t.Errorf("Expected to send %d bytes, sent %d", len(testData), n)
|
||||
}
|
||||
|
||||
// Receive data
|
||||
buf := make([]byte, 1024)
|
||||
receiverConn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, _, err = receiverConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to receive data: %v", err)
|
||||
}
|
||||
|
||||
if string(buf[:n]) != string(testData) {
|
||||
t.Errorf("Expected to receive %q, got %q", testData, buf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharedBindConcurrentWrites tests thread-safety
|
||||
func TestSharedBindConcurrentWrites(t *testing.T) {
|
||||
// Create sender
|
||||
senderConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender UDP connection: %v", err)
|
||||
}
|
||||
|
||||
senderBind, err := New(senderConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender SharedBind: %v", err)
|
||||
}
|
||||
defer senderBind.Close()
|
||||
|
||||
// Create receiver
|
||||
receiverConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create receiver UDP connection: %v", err)
|
||||
}
|
||||
defer receiverConn.Close()
|
||||
|
||||
receiverAddr := receiverConn.LocalAddr().(*net.UDPAddr)
|
||||
|
||||
// Launch concurrent writes
|
||||
numGoroutines := 100
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(numGoroutines)
|
||||
|
||||
for i := 0; i < numGoroutines; i++ {
|
||||
go func(id int) {
|
||||
defer wg.Done()
|
||||
data := []byte{byte(id)}
|
||||
_, err := senderBind.WriteToUDP(data, receiverAddr)
|
||||
if err != nil {
|
||||
t.Errorf("WriteToUDP failed in goroutine %d: %v", id, err)
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// TestSharedBindWireGuardInterface tests WireGuard Bind interface implementation
|
||||
func TestSharedBindWireGuardInterface(t *testing.T) {
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create UDP connection: %v", err)
|
||||
}
|
||||
|
||||
bind, err := New(udpConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
defer bind.Close()
|
||||
|
||||
// Test Open
|
||||
recvFuncs, port, err := bind.Open(0)
|
||||
if err != nil {
|
||||
t.Fatalf("Open failed: %v", err)
|
||||
}
|
||||
|
||||
if len(recvFuncs) == 0 {
|
||||
t.Error("Expected at least one receive function")
|
||||
}
|
||||
|
||||
if port == 0 {
|
||||
t.Error("Expected non-zero port")
|
||||
}
|
||||
|
||||
// Test SetMark (should be a no-op)
|
||||
if err := bind.SetMark(0); err != nil {
|
||||
t.Errorf("SetMark failed: %v", err)
|
||||
}
|
||||
|
||||
// Test BatchSize
|
||||
batchSize := bind.BatchSize()
|
||||
if batchSize <= 0 {
|
||||
t.Error("Expected positive batch size")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharedBindSend tests the Send method with WireGuard endpoints
|
||||
func TestSharedBindSend(t *testing.T) {
|
||||
// Create sender
|
||||
senderConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender UDP connection: %v", err)
|
||||
}
|
||||
|
||||
senderBind, err := New(senderConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create sender SharedBind: %v", err)
|
||||
}
|
||||
defer senderBind.Close()
|
||||
|
||||
// Create receiver
|
||||
receiverConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create receiver UDP connection: %v", err)
|
||||
}
|
||||
defer receiverConn.Close()
|
||||
|
||||
receiverAddr := receiverConn.LocalAddr().(*net.UDPAddr)
|
||||
|
||||
// Create an endpoint
|
||||
addrPort := receiverAddr.AddrPort()
|
||||
endpoint := &wgConn.StdNetEndpoint{AddrPort: addrPort}
|
||||
|
||||
// Send data
|
||||
testData := []byte("WireGuard packet")
|
||||
bufs := [][]byte{testData}
|
||||
err = senderBind.Send(bufs, endpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Send failed: %v", err)
|
||||
}
|
||||
|
||||
// Receive data
|
||||
buf := make([]byte, 1024)
|
||||
receiverConn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, _, err := receiverConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to receive data: %v", err)
|
||||
}
|
||||
|
||||
if string(buf[:n]) != string(testData) {
|
||||
t.Errorf("Expected to receive %q, got %q", testData, buf[:n])
|
||||
}
|
||||
}
|
||||
|
||||
// TestSharedBindMultipleUsers simulates WireGuard and hole punch using the same bind
|
||||
func TestSharedBindMultipleUsers(t *testing.T) {
|
||||
// Create shared bind
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create UDP connection: %v", err)
|
||||
}
|
||||
|
||||
sharedBind, err := New(udpConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
|
||||
// Add reference for hole punch sender
|
||||
sharedBind.AddRef()
|
||||
|
||||
// Create receiver
|
||||
receiverConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create receiver UDP connection: %v", err)
|
||||
}
|
||||
defer receiverConn.Close()
|
||||
|
||||
receiverAddr := receiverConn.LocalAddr().(*net.UDPAddr)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Simulate WireGuard using the bind
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
addrPort := receiverAddr.AddrPort()
|
||||
endpoint := &wgConn.StdNetEndpoint{AddrPort: addrPort}
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
data := []byte("WireGuard packet")
|
||||
bufs := [][]byte{data}
|
||||
if err := sharedBind.Send(bufs, endpoint); err != nil {
|
||||
t.Errorf("WireGuard Send failed: %v", err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
// Simulate hole punch sender using the bind
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for i := 0; i < 10; i++ {
|
||||
data := []byte("Hole punch packet")
|
||||
if _, err := sharedBind.WriteToUDP(data, receiverAddr); err != nil {
|
||||
t.Errorf("Hole punch WriteToUDP failed: %v", err)
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
// Release the hole punch reference
|
||||
sharedBind.Release()
|
||||
|
||||
// Close WireGuard's reference (should close the connection)
|
||||
sharedBind.Close()
|
||||
|
||||
if !sharedBind.closed.Load() {
|
||||
t.Error("Expected bind to be closed after all users released it")
|
||||
}
|
||||
}
|
||||
|
||||
// TestEndpoint tests the Endpoint implementation
|
||||
func TestEndpoint(t *testing.T) {
|
||||
addr := netip.MustParseAddr("192.168.1.1")
|
||||
addrPort := netip.AddrPortFrom(addr, 51820)
|
||||
|
||||
ep := &Endpoint{AddrPort: addrPort}
|
||||
|
||||
// Test DstIP
|
||||
if ep.DstIP() != addr {
|
||||
t.Errorf("Expected DstIP to be %v, got %v", addr, ep.DstIP())
|
||||
}
|
||||
|
||||
// Test DstToString
|
||||
expected := "192.168.1.1:51820"
|
||||
if ep.DstToString() != expected {
|
||||
t.Errorf("Expected DstToString to be %q, got %q", expected, ep.DstToString())
|
||||
}
|
||||
|
||||
// Test DstToBytes
|
||||
bytes := ep.DstToBytes()
|
||||
if len(bytes) == 0 {
|
||||
t.Error("Expected DstToBytes to return non-empty slice")
|
||||
}
|
||||
|
||||
// Test SrcIP (should be zero)
|
||||
if ep.SrcIP().IsValid() {
|
||||
t.Error("Expected SrcIP to be invalid")
|
||||
}
|
||||
|
||||
// Test ClearSrc (should not panic)
|
||||
ep.ClearSrc()
|
||||
}
|
||||
|
||||
// TestParseEndpoint tests the ParseEndpoint method
|
||||
func TestParseEndpoint(t *testing.T) {
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create UDP connection: %v", err)
|
||||
}
|
||||
|
||||
bind, err := New(udpConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
defer bind.Close()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
wantErr bool
|
||||
checkAddr func(*testing.T, wgConn.Endpoint)
|
||||
}{
|
||||
{
|
||||
name: "valid IPv4",
|
||||
input: "192.168.1.1:51820",
|
||||
wantErr: false,
|
||||
checkAddr: func(t *testing.T, ep wgConn.Endpoint) {
|
||||
if ep.DstToString() != "192.168.1.1:51820" {
|
||||
t.Errorf("Expected 192.168.1.1:51820, got %s", ep.DstToString())
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid IPv6",
|
||||
input: "[::1]:51820",
|
||||
wantErr: false,
|
||||
checkAddr: func(t *testing.T, ep wgConn.Endpoint) {
|
||||
if ep.DstToString() != "[::1]:51820" {
|
||||
t.Errorf("Expected [::1]:51820, got %s", ep.DstToString())
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid - missing port",
|
||||
input: "192.168.1.1",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid - bad format",
|
||||
input: "not-an-address",
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ep, err := bind.ParseEndpoint(tt.input)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("ParseEndpoint() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !tt.wantErr && tt.checkAddr != nil {
|
||||
tt.checkAddr(t, ep)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestNetstackRouting tests that packets from netstack endpoints are routed back through netstack
|
||||
func TestNetstackRouting(t *testing.T) {
|
||||
// Create the SharedBind with a physical UDP socket
|
||||
physicalConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create physical UDP connection: %v", err)
|
||||
}
|
||||
|
||||
sharedBind, err := New(physicalConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
defer sharedBind.Close()
|
||||
|
||||
// Create a mock "netstack" connection (just another UDP socket for testing)
|
||||
netstackConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create netstack UDP connection: %v", err)
|
||||
}
|
||||
defer netstackConn.Close()
|
||||
|
||||
// Set the netstack connection
|
||||
sharedBind.SetNetstackConn(netstackConn)
|
||||
|
||||
// Create a "client" that would receive packets
|
||||
clientConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client UDP connection: %v", err)
|
||||
}
|
||||
defer clientConn.Close()
|
||||
|
||||
clientAddr := clientConn.LocalAddr().(*net.UDPAddr)
|
||||
clientAddrPort := clientAddr.AddrPort()
|
||||
|
||||
// Inject a packet from the "netstack" source - this should track the endpoint
|
||||
testData := []byte("test packet from netstack")
|
||||
err = sharedBind.InjectPacket(testData, clientAddrPort)
|
||||
if err != nil {
|
||||
t.Fatalf("InjectPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Now when we send a response to this endpoint, it should go through netstack
|
||||
endpoint := &wgConn.StdNetEndpoint{AddrPort: clientAddrPort}
|
||||
responseData := []byte("response packet")
|
||||
err = sharedBind.Send([][]byte{responseData}, endpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Send failed: %v", err)
|
||||
}
|
||||
|
||||
// The packet should be received by the client from the netstack connection
|
||||
buf := make([]byte, 1024)
|
||||
clientConn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, fromAddr, err := clientConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to receive response: %v", err)
|
||||
}
|
||||
|
||||
if string(buf[:n]) != string(responseData) {
|
||||
t.Errorf("Expected to receive %q, got %q", responseData, buf[:n])
|
||||
}
|
||||
|
||||
// Verify the response came from the netstack connection, not the physical one
|
||||
netstackAddr := netstackConn.LocalAddr().(*net.UDPAddr)
|
||||
if fromAddr.Port != netstackAddr.Port {
|
||||
t.Errorf("Expected response from netstack port %d, got %d", netstackAddr.Port, fromAddr.Port)
|
||||
}
|
||||
}
|
||||
|
||||
// TestSocketRouting tests that packets from socket endpoints are routed through socket
|
||||
func TestSocketRouting(t *testing.T) {
|
||||
// Create the SharedBind with a physical UDP socket
|
||||
physicalConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create physical UDP connection: %v", err)
|
||||
}
|
||||
|
||||
sharedBind, err := New(physicalConn)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create SharedBind: %v", err)
|
||||
}
|
||||
defer sharedBind.Close()
|
||||
|
||||
// Create a mock "netstack" connection
|
||||
netstackConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create netstack UDP connection: %v", err)
|
||||
}
|
||||
defer netstackConn.Close()
|
||||
|
||||
// Set the netstack connection
|
||||
sharedBind.SetNetstackConn(netstackConn)
|
||||
|
||||
// Create a "client" that would receive packets (this simulates a hole-punched client)
|
||||
clientConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client UDP connection: %v", err)
|
||||
}
|
||||
defer clientConn.Close()
|
||||
|
||||
clientAddr := clientConn.LocalAddr().(*net.UDPAddr)
|
||||
clientAddrPort := clientAddr.AddrPort()
|
||||
|
||||
// Don't inject from netstack - this endpoint is NOT tracked as netstack-sourced
|
||||
// So Send should use the physical socket
|
||||
|
||||
endpoint := &wgConn.StdNetEndpoint{AddrPort: clientAddrPort}
|
||||
responseData := []byte("response packet via socket")
|
||||
err = sharedBind.Send([][]byte{responseData}, endpoint)
|
||||
if err != nil {
|
||||
t.Fatalf("Send failed: %v", err)
|
||||
}
|
||||
|
||||
// The packet should be received by the client from the physical connection
|
||||
buf := make([]byte, 1024)
|
||||
clientConn.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
n, fromAddr, err := clientConn.ReadFromUDP(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to receive response: %v", err)
|
||||
}
|
||||
|
||||
if string(buf[:n]) != string(responseData) {
|
||||
t.Errorf("Expected to receive %q, got %q", responseData, buf[:n])
|
||||
}
|
||||
|
||||
// Verify the response came from the physical connection, not the netstack one
|
||||
physicalAddr := physicalConn.LocalAddr().(*net.UDPAddr)
|
||||
if fromAddr.Port != physicalAddr.Port {
|
||||
t.Errorf("Expected response from physical port %d, got %d", physicalAddr.Port, fromAddr.Port)
|
||||
}
|
||||
}
|
||||
37
blueprint.yaml
Normal file
37
blueprint.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
resources:
|
||||
resource-nice-id:
|
||||
name: this is my resource
|
||||
protocol: http
|
||||
full-domain: level1.test3.example.com
|
||||
host-header: example.com
|
||||
tls-server-name: example.com
|
||||
auth:
|
||||
pincode: 123456
|
||||
password: sadfasdfadsf
|
||||
sso-enabled: true
|
||||
sso-roles:
|
||||
- Member
|
||||
sso-users:
|
||||
- owen@pangolin.net
|
||||
whitelist-users:
|
||||
- owen@pangolin.net
|
||||
targets:
|
||||
# - site: glossy-plains-viscacha-rat
|
||||
- hostname: localhost
|
||||
method: http
|
||||
port: 8000
|
||||
healthcheck:
|
||||
port: 8000
|
||||
hostname: localhost
|
||||
# - site: glossy-plains-viscacha-rat
|
||||
- hostname: localhost
|
||||
method: http
|
||||
port: 8001
|
||||
resource-nice-id2:
|
||||
name: this is other resource
|
||||
protocol: tcp
|
||||
proxy-port: 3000
|
||||
targets:
|
||||
# - site: glossy-plains-viscacha-rat
|
||||
- hostname: localhost
|
||||
port: 3000
|
||||
104
clients.go
Normal file
104
clients.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/fosrl/newt/clients"
|
||||
wgnetstack "github.com/fosrl/newt/clients"
|
||||
"github.com/fosrl/newt/clients/permissions"
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/fosrl/newt/websocket"
|
||||
"golang.zx2c4.com/wireguard/tun/netstack"
|
||||
)
|
||||
|
||||
var wgService *clients.WireGuardService
|
||||
var ready bool
|
||||
|
||||
func setupClients(client *websocket.Client) {
|
||||
var host = endpoint
|
||||
if strings.HasPrefix(host, "http://") {
|
||||
host = strings.TrimPrefix(host, "http://")
|
||||
} else if strings.HasPrefix(host, "https://") {
|
||||
host = strings.TrimPrefix(host, "https://")
|
||||
}
|
||||
|
||||
host = strings.TrimSuffix(host, "/")
|
||||
|
||||
logger.Debug("Setting up clients with netstack2...")
|
||||
|
||||
// if useNativeInterface is true make sure we have permission to use native interface
|
||||
if useNativeInterface {
|
||||
logger.Debug("Checking permissions for native interface")
|
||||
err := permissions.CheckNativeInterfacePermissions()
|
||||
if err != nil {
|
||||
logger.Fatal("Insufficient permissions to create native TUN interface: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Create WireGuard service
|
||||
wgService, err = wgnetstack.NewWireGuardService(interfaceName, port, mtuInt, host, id, client, dns, useNativeInterface)
|
||||
if err != nil {
|
||||
logger.Fatal("Failed to create WireGuard service: %v", err)
|
||||
}
|
||||
|
||||
client.OnTokenUpdate(func(token string) {
|
||||
wgService.SetToken(token)
|
||||
})
|
||||
|
||||
ready = true
|
||||
}
|
||||
|
||||
func setDownstreamTNetstack(tnet *netstack.Net) {
|
||||
if wgService != nil {
|
||||
wgService.SetOthertnet(tnet)
|
||||
}
|
||||
}
|
||||
|
||||
func closeClients() {
|
||||
logger.Info("Closing clients...")
|
||||
if wgService != nil {
|
||||
wgService.Close()
|
||||
wgService = nil
|
||||
}
|
||||
}
|
||||
|
||||
func clientsHandleNewtConnection(publicKey string, endpoint string, relayPort uint16) {
|
||||
if !ready {
|
||||
return
|
||||
}
|
||||
|
||||
// split off the port from the endpoint
|
||||
parts := strings.Split(endpoint, ":")
|
||||
if len(parts) < 2 {
|
||||
logger.Error("Invalid endpoint format: %s", endpoint)
|
||||
return
|
||||
}
|
||||
endpoint = strings.Join(parts[:len(parts)-1], ":")
|
||||
|
||||
if wgService != nil {
|
||||
wgService.StartHolepunch(publicKey, endpoint, relayPort)
|
||||
}
|
||||
}
|
||||
|
||||
func clientsOnConnect() {
|
||||
if !ready {
|
||||
return
|
||||
}
|
||||
if wgService != nil {
|
||||
wgService.LoadRemoteConfig()
|
||||
}
|
||||
}
|
||||
|
||||
// clientsStartDirectRelay starts a direct UDP relay from the main tunnel netstack
|
||||
// to the clients' WireGuard, bypassing the proxy for better performance.
|
||||
func clientsStartDirectRelay(tunnelIP string) {
|
||||
if !ready {
|
||||
return
|
||||
}
|
||||
if wgService != nil {
|
||||
if err := wgService.StartDirectUDPRelay(tunnelIP); err != nil {
|
||||
logger.Error("Failed to start direct UDP relay: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
1276
clients/clients.go
Normal file
1276
clients/clients.go
Normal file
File diff suppressed because it is too large
Load Diff
8
clients/permissions/permissions_android.go
Normal file
8
clients/permissions/permissions_android.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build android
|
||||
|
||||
package permissions
|
||||
|
||||
// CheckNativeInterfacePermissions always allows permission on Android.
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
return nil
|
||||
}
|
||||
18
clients/permissions/permissions_darwin.go
Normal file
18
clients/permissions/permissions_darwin.go
Normal file
@@ -0,0 +1,18 @@
|
||||
//go:build darwin && !ios
|
||||
|
||||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// CheckNativeInterfacePermissions checks if the process has sufficient
|
||||
// permissions to create a native TUN interface on macOS.
|
||||
// This typically requires root privileges.
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
if os.Geteuid() == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("insufficient permissions: need root to create TUN interface on macOS")
|
||||
}
|
||||
57
clients/permissions/permissions_freebsd.go
Normal file
57
clients/permissions/permissions_freebsd.go
Normal file
@@ -0,0 +1,57 @@
|
||||
//go:build freebsd
|
||||
|
||||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
// TUN device on FreeBSD
|
||||
tunDevice = "/dev/tun"
|
||||
ifnamsiz = 16
|
||||
iffTun = 0x0001
|
||||
iffNoPi = 0x1000
|
||||
)
|
||||
|
||||
// ifReq is the structure for TUN interface configuration
|
||||
type ifReq struct {
|
||||
Name [ifnamsiz]byte
|
||||
Flags uint16
|
||||
_ [22]byte // padding to match kernel structure
|
||||
}
|
||||
|
||||
// CheckNativeInterfacePermissions checks if the process has sufficient
|
||||
// permissions to create a native TUN interface on FreeBSD.
|
||||
// This requires root privileges (UID 0).
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
logger.Debug("Checking native interface permissions on FreeBSD")
|
||||
|
||||
// Check if running as root
|
||||
if os.Geteuid() == 0 {
|
||||
logger.Debug("Running as root, sufficient permissions for native TUN interface")
|
||||
return nil
|
||||
}
|
||||
|
||||
// On FreeBSD, only root can create TUN interfaces
|
||||
// Try to open the TUN device to verify
|
||||
return tryOpenTunDevice()
|
||||
}
|
||||
|
||||
// tryOpenTunDevice attempts to open the TUN device to verify permissions.
|
||||
// On FreeBSD, /dev/tun is a cloning device that creates a new interface
|
||||
// when opened.
|
||||
func tryOpenTunDevice() error {
|
||||
// Try opening /dev/tun (cloning device)
|
||||
f, err := os.OpenFile(tunDevice, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %s: %v (need root privileges)", tunDevice, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
logger.Debug("Successfully opened TUN device, sufficient permissions for native TUN interface")
|
||||
return nil
|
||||
}
|
||||
8
clients/permissions/permissions_ios.go
Normal file
8
clients/permissions/permissions_ios.go
Normal file
@@ -0,0 +1,8 @@
|
||||
//go:build ios
|
||||
|
||||
package permissions
|
||||
|
||||
// CheckNativeInterfacePermissions always allows permission on iOS.
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
return nil
|
||||
}
|
||||
96
clients/permissions/permissions_linux.go
Normal file
96
clients/permissions/permissions_linux.go
Normal file
@@ -0,0 +1,96 @@
|
||||
//go:build linux && !android
|
||||
|
||||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// TUN device constants
|
||||
tunDevice = "/dev/net/tun"
|
||||
ifnamsiz = 16
|
||||
iffTun = 0x0001
|
||||
iffNoPi = 0x1000
|
||||
tunSetIff = 0x400454ca
|
||||
)
|
||||
|
||||
// ifReq is the structure for TUNSETIFF ioctl
|
||||
type ifReq struct {
|
||||
Name [ifnamsiz]byte
|
||||
Flags uint16
|
||||
_ [22]byte // padding to match kernel structure
|
||||
}
|
||||
|
||||
// CheckNativeInterfacePermissions checks if the process has sufficient
|
||||
// permissions to create a native TUN interface on Linux.
|
||||
// This requires either root privileges (UID 0) or CAP_NET_ADMIN capability.
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
logger.Debug("Checking native interface permissions on Linux")
|
||||
|
||||
// Check if running as root
|
||||
if os.Geteuid() == 0 {
|
||||
logger.Debug("Running as root, sufficient permissions for native TUN interface")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check for CAP_NET_ADMIN capability
|
||||
caps := unix.CapUserHeader{
|
||||
Version: unix.LINUX_CAPABILITY_VERSION_3,
|
||||
Pid: 0, // 0 means current process
|
||||
}
|
||||
|
||||
var data [2]unix.CapUserData
|
||||
if err := unix.Capget(&caps, &data[0]); err != nil {
|
||||
logger.Debug("Failed to get capabilities: %v, will try creating test TUN", err)
|
||||
} else {
|
||||
// CAP_NET_ADMIN is capability bit 12
|
||||
const CAP_NET_ADMIN = 12
|
||||
if data[0].Effective&(1<<CAP_NET_ADMIN) != 0 {
|
||||
logger.Debug("Process has CAP_NET_ADMIN capability, sufficient permissions for native TUN interface")
|
||||
return nil
|
||||
}
|
||||
logger.Debug("Process does not have CAP_NET_ADMIN capability in effective set")
|
||||
}
|
||||
|
||||
// Actually try to create a TUN interface to verify permissions
|
||||
// This is the most reliable check as it tests the actual operation
|
||||
return tryCreateTestTun()
|
||||
}
|
||||
|
||||
// tryCreateTestTun attempts to create a temporary TUN interface to verify
|
||||
// we have the necessary permissions. This tests the actual ioctl call that
|
||||
// will be used when creating the real interface.
|
||||
func tryCreateTestTun() error {
|
||||
f, err := os.OpenFile(tunDevice, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot open %s: %v (need root or CAP_NET_ADMIN capability)", tunDevice, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Try to create a TUN interface with a test name
|
||||
// Using a random-ish name to avoid conflicts
|
||||
var req ifReq
|
||||
copy(req.Name[:], "tuntest0")
|
||||
req.Flags = iffTun | iffNoPi
|
||||
|
||||
_, _, errno := unix.Syscall(
|
||||
unix.SYS_IOCTL,
|
||||
f.Fd(),
|
||||
uintptr(tunSetIff),
|
||||
uintptr(unsafe.Pointer(&req)),
|
||||
)
|
||||
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("cannot create TUN interface (ioctl TUNSETIFF failed): %v (need root or CAP_NET_ADMIN capability)", errno)
|
||||
}
|
||||
|
||||
// Success - the interface will be automatically destroyed when we close the fd
|
||||
logger.Debug("Successfully created test TUN interface, sufficient permissions for native TUN interface")
|
||||
return nil
|
||||
}
|
||||
48
clients/permissions/permissions_windows.go
Normal file
48
clients/permissions/permissions_windows.go
Normal file
@@ -0,0 +1,48 @@
|
||||
//go:build windows
|
||||
|
||||
package permissions
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
"golang.org/x/sys/windows/svc"
|
||||
)
|
||||
|
||||
// CheckNativeInterfacePermissions checks if the process has sufficient
|
||||
// permissions to create a native TUN interface on Windows.
|
||||
// This requires Administrator privileges and must be running as a Windows service.
|
||||
func CheckNativeInterfacePermissions() error {
|
||||
// Check if running as a Windows service
|
||||
isService, err := svc.IsWindowsService()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check if running as Windows service: %v", err)
|
||||
}
|
||||
if !isService {
|
||||
return fmt.Errorf("native TUN interface requires running as a Windows service")
|
||||
}
|
||||
|
||||
var sid *windows.SID
|
||||
err = windows.AllocateAndInitializeSid(
|
||||
&windows.SECURITY_NT_AUTHORITY,
|
||||
2,
|
||||
windows.SECURITY_BUILTIN_DOMAIN_RID,
|
||||
windows.DOMAIN_ALIAS_RID_ADMINS,
|
||||
0, 0, 0, 0, 0, 0,
|
||||
&sid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize SID: %v", err)
|
||||
}
|
||||
defer windows.FreeSid(sid)
|
||||
|
||||
token := windows.Token(0)
|
||||
member, err := token.IsMember(sid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check admin group membership: %v", err)
|
||||
}
|
||||
|
||||
if !member {
|
||||
return fmt.Errorf("insufficient permissions: need Administrator to create TUN interface on Windows")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
520
common.go
Normal file
520
common.go
Normal file
@@ -0,0 +1,520 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/fosrl/newt/internal/telemetry"
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/fosrl/newt/proxy"
|
||||
"github.com/fosrl/newt/websocket"
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.zx2c4.com/wireguard/tun/netstack"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
const msgHealthFileWriteFailed = "Failed to write health file: %v"
|
||||
|
||||
func ping(tnet *netstack.Net, dst string, timeout time.Duration) (time.Duration, error) {
|
||||
// logger.Debug("Pinging %s", dst)
|
||||
socket, err := tnet.Dial("ping4", dst)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create ICMP socket: %w", err)
|
||||
}
|
||||
defer socket.Close()
|
||||
|
||||
// Set socket buffer sizes to handle high bandwidth scenarios
|
||||
if tcpConn, ok := socket.(interface{ SetReadBuffer(int) error }); ok {
|
||||
tcpConn.SetReadBuffer(64 * 1024)
|
||||
}
|
||||
if tcpConn, ok := socket.(interface{ SetWriteBuffer(int) error }); ok {
|
||||
tcpConn.SetWriteBuffer(64 * 1024)
|
||||
}
|
||||
|
||||
requestPing := icmp.Echo{
|
||||
Seq: rand.Intn(1 << 16),
|
||||
Data: []byte("newtping"),
|
||||
}
|
||||
|
||||
icmpBytes, err := (&icmp.Message{Type: ipv4.ICMPTypeEcho, Code: 0, Body: &requestPing}).Marshal(nil)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to marshal ICMP message: %w", err)
|
||||
}
|
||||
|
||||
if err := socket.SetReadDeadline(time.Now().Add(timeout)); err != nil {
|
||||
return 0, fmt.Errorf("failed to set read deadline: %w", err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
_, err = socket.Write(icmpBytes)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to write ICMP packet: %w", err)
|
||||
}
|
||||
|
||||
// Use larger buffer for reading to handle potential network congestion
|
||||
readBuffer := make([]byte, 1500)
|
||||
n, err := socket.Read(readBuffer)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to read ICMP packet: %w", err)
|
||||
}
|
||||
|
||||
replyPacket, err := icmp.ParseMessage(1, readBuffer[:n])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse ICMP packet: %w", err)
|
||||
}
|
||||
|
||||
replyPing, ok := replyPacket.Body.(*icmp.Echo)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("invalid reply type: got %T, want *icmp.Echo", replyPacket.Body)
|
||||
}
|
||||
|
||||
if !bytes.Equal(replyPing.Data, requestPing.Data) || replyPing.Seq != requestPing.Seq {
|
||||
return 0, fmt.Errorf("invalid ping reply: got seq=%d data=%q, want seq=%d data=%q",
|
||||
replyPing.Seq, replyPing.Data, requestPing.Seq, requestPing.Data)
|
||||
}
|
||||
|
||||
latency := time.Since(start)
|
||||
|
||||
// logger.Debug("Ping to %s successful, latency: %v", dst, latency)
|
||||
|
||||
return latency, nil
|
||||
}
|
||||
|
||||
// reliablePing performs multiple ping attempts with adaptive timeout
|
||||
func reliablePing(tnet *netstack.Net, dst string, baseTimeout time.Duration, maxAttempts int) (time.Duration, error) {
|
||||
var lastErr error
|
||||
var totalLatency time.Duration
|
||||
successCount := 0
|
||||
|
||||
for attempt := 1; attempt <= maxAttempts; attempt++ {
|
||||
// Adaptive timeout: increase timeout for later attempts
|
||||
timeout := baseTimeout + time.Duration(attempt-1)*500*time.Millisecond
|
||||
|
||||
// Add jitter to prevent thundering herd
|
||||
jitter := time.Duration(rand.Intn(100)) * time.Millisecond
|
||||
timeout += jitter
|
||||
|
||||
latency, err := ping(tnet, dst, timeout)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
logger.Debug("Ping attempt %d/%d failed: %v", attempt, maxAttempts, err)
|
||||
|
||||
// Brief pause between attempts with exponential backoff
|
||||
if attempt < maxAttempts {
|
||||
backoff := time.Duration(attempt) * 50 * time.Millisecond
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
totalLatency += latency
|
||||
successCount++
|
||||
|
||||
// If we get at least one success, we can return early for health checks
|
||||
if successCount > 0 {
|
||||
avgLatency := totalLatency / time.Duration(successCount)
|
||||
// logger.Debug("Reliable ping succeeded after %d attempts, avg latency: %v", attempt, avgLatency)
|
||||
return avgLatency, nil
|
||||
}
|
||||
}
|
||||
|
||||
if successCount == 0 {
|
||||
return 0, fmt.Errorf("all %d ping attempts failed, last error: %v", maxAttempts, lastErr)
|
||||
}
|
||||
|
||||
return totalLatency / time.Duration(successCount), nil
|
||||
}
|
||||
|
||||
func pingWithRetry(tnet *netstack.Net, dst string, timeout time.Duration) (stopChan chan struct{}, err error) {
|
||||
|
||||
if healthFile != "" {
|
||||
err = os.Remove(healthFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to remove health file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
initialMaxAttempts = 5
|
||||
initialRetryDelay = 2 * time.Second
|
||||
maxRetryDelay = 60 * time.Second // Cap the maximum delay
|
||||
)
|
||||
|
||||
stopChan = make(chan struct{})
|
||||
attempt := 1
|
||||
retryDelay := initialRetryDelay
|
||||
|
||||
// First try with the initial parameters
|
||||
logger.Debug("Ping attempt %d", attempt)
|
||||
if latency, err := ping(tnet, dst, timeout); err == nil {
|
||||
// Successful ping
|
||||
logger.Debug("Ping latency: %v", latency)
|
||||
logger.Info("Tunnel connection to server established successfully!")
|
||||
if healthFile != "" {
|
||||
err := os.WriteFile(healthFile, []byte("ok"), 0644)
|
||||
if err != nil {
|
||||
logger.Warn(msgHealthFileWriteFailed, err)
|
||||
}
|
||||
}
|
||||
return stopChan, nil
|
||||
} else {
|
||||
logger.Warn("Ping attempt %d failed: %v", attempt, err)
|
||||
}
|
||||
|
||||
// Start a goroutine that will attempt pings indefinitely with increasing delays
|
||||
go func() {
|
||||
attempt = 2 // Continue from attempt 2
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-stopChan:
|
||||
return
|
||||
default:
|
||||
logger.Debug("Ping attempt %d", attempt)
|
||||
|
||||
if latency, err := ping(tnet, dst, timeout); err != nil {
|
||||
logger.Warn("Ping attempt %d failed: %v", attempt, err)
|
||||
|
||||
// Increase delay after certain thresholds but cap it
|
||||
if attempt%5 == 0 && retryDelay < maxRetryDelay {
|
||||
retryDelay = time.Duration(float64(retryDelay) * 1.5)
|
||||
if retryDelay > maxRetryDelay {
|
||||
retryDelay = maxRetryDelay
|
||||
}
|
||||
logger.Info("Increasing ping retry delay to %v", retryDelay)
|
||||
}
|
||||
|
||||
time.Sleep(retryDelay)
|
||||
attempt++
|
||||
} else {
|
||||
// Successful ping
|
||||
logger.Debug("Ping succeeded after %d attempts", attempt)
|
||||
logger.Debug("Ping latency: %v", latency)
|
||||
logger.Info("Tunnel connection to server established successfully!")
|
||||
if healthFile != "" {
|
||||
err := os.WriteFile(healthFile, []byte("ok"), 0644)
|
||||
if err != nil {
|
||||
logger.Warn(msgHealthFileWriteFailed, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
case <-pingStopChan:
|
||||
// Stop the goroutine when signaled
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Return an error for the first batch of attempts (to maintain compatibility with existing code)
|
||||
return stopChan, fmt.Errorf("initial ping attempts failed, continuing in background")
|
||||
}
|
||||
|
||||
func startPingCheck(tnet *netstack.Net, serverIP string, client *websocket.Client, tunnelID string) chan struct{} {
|
||||
maxInterval := 6 * time.Second
|
||||
currentInterval := pingInterval
|
||||
consecutiveFailures := 0
|
||||
connectionLost := false
|
||||
|
||||
// Track recent latencies for adaptive timeout calculation
|
||||
recentLatencies := make([]time.Duration, 0, 10)
|
||||
|
||||
pingStopChan := make(chan struct{})
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(currentInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// Calculate adaptive timeout based on recent latencies
|
||||
adaptiveTimeout := pingTimeout
|
||||
if len(recentLatencies) > 0 {
|
||||
var sum time.Duration
|
||||
for _, lat := range recentLatencies {
|
||||
sum += lat
|
||||
}
|
||||
avgLatency := sum / time.Duration(len(recentLatencies))
|
||||
// Use 3x average latency as timeout, with minimum of pingTimeout
|
||||
adaptiveTimeout = avgLatency * 3
|
||||
if adaptiveTimeout < pingTimeout {
|
||||
adaptiveTimeout = pingTimeout
|
||||
}
|
||||
if adaptiveTimeout > 15*time.Second {
|
||||
adaptiveTimeout = 15 * time.Second
|
||||
}
|
||||
}
|
||||
|
||||
// Use reliable ping with multiple attempts
|
||||
maxAttempts := 2
|
||||
if consecutiveFailures > 4 {
|
||||
maxAttempts = 4 // More attempts when connection is unstable
|
||||
}
|
||||
|
||||
latency, err := reliablePing(tnet, serverIP, adaptiveTimeout, maxAttempts)
|
||||
if err != nil {
|
||||
consecutiveFailures++
|
||||
|
||||
// Track recent latencies (add a high value for failures)
|
||||
recentLatencies = append(recentLatencies, adaptiveTimeout)
|
||||
if len(recentLatencies) > 10 {
|
||||
recentLatencies = recentLatencies[1:]
|
||||
}
|
||||
|
||||
if consecutiveFailures < 2 {
|
||||
logger.Debug("Periodic ping failed (%d consecutive failures): %v", consecutiveFailures, err)
|
||||
} else {
|
||||
logger.Warn("Periodic ping failed (%d consecutive failures): %v", consecutiveFailures, err)
|
||||
}
|
||||
|
||||
// More lenient threshold for declaring connection lost under load
|
||||
failureThreshold := 4
|
||||
if consecutiveFailures >= failureThreshold && currentInterval < maxInterval {
|
||||
if !connectionLost {
|
||||
connectionLost = true
|
||||
logger.Warn("Connection to server lost after %d failures. Continuous reconnection attempts will be made.", consecutiveFailures)
|
||||
if tunnelID != "" {
|
||||
telemetry.IncReconnect(context.Background(), tunnelID, "client", telemetry.ReasonTimeout)
|
||||
}
|
||||
stopFunc = client.SendMessageInterval("newt/ping/request", map[string]interface{}{}, 3*time.Second)
|
||||
// Send registration message to the server for backward compatibility
|
||||
err := client.SendMessage("newt/wg/register", map[string]interface{}{
|
||||
"publicKey": publicKey.String(),
|
||||
"backwardsCompatible": true,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("Failed to send registration message: %v", err)
|
||||
}
|
||||
if healthFile != "" {
|
||||
err = os.Remove(healthFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to remove health file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
currentInterval = time.Duration(float64(currentInterval) * 1.3) // Slower increase
|
||||
if currentInterval > maxInterval {
|
||||
currentInterval = maxInterval
|
||||
}
|
||||
ticker.Reset(currentInterval)
|
||||
logger.Debug("Increased ping check interval to %v due to consecutive failures", currentInterval)
|
||||
}
|
||||
} else {
|
||||
// Track recent latencies
|
||||
recentLatencies = append(recentLatencies, latency)
|
||||
// Record tunnel latency (limit sampling to this periodic check)
|
||||
if tunnelID != "" {
|
||||
telemetry.ObserveTunnelLatency(context.Background(), tunnelID, "wireguard", latency.Seconds())
|
||||
}
|
||||
if len(recentLatencies) > 10 {
|
||||
recentLatencies = recentLatencies[1:]
|
||||
}
|
||||
|
||||
if connectionLost {
|
||||
connectionLost = false
|
||||
logger.Info("Connection to server restored after %d failures!", consecutiveFailures)
|
||||
if healthFile != "" {
|
||||
err := os.WriteFile(healthFile, []byte("ok"), 0644)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to write health file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if currentInterval > pingInterval {
|
||||
currentInterval = time.Duration(float64(currentInterval) * 0.9) // Slower decrease
|
||||
if currentInterval < pingInterval {
|
||||
currentInterval = pingInterval
|
||||
}
|
||||
ticker.Reset(currentInterval)
|
||||
logger.Debug("Decreased ping check interval to %v after successful ping", currentInterval)
|
||||
}
|
||||
consecutiveFailures = 0
|
||||
}
|
||||
case <-pingStopChan:
|
||||
logger.Info("Stopping ping check")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return pingStopChan
|
||||
}
|
||||
|
||||
func parseTargetData(data interface{}) (TargetData, error) {
|
||||
var targetData TargetData
|
||||
jsonData, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
logger.Info("Error marshaling data: %v", err)
|
||||
return targetData, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(jsonData, &targetData); err != nil {
|
||||
logger.Info("Error unmarshaling target data: %v", err)
|
||||
return targetData, err
|
||||
}
|
||||
return targetData, nil
|
||||
}
|
||||
|
||||
func updateTargets(pm *proxy.ProxyManager, action string, tunnelIP string, proto string, targetData TargetData) error {
|
||||
for _, t := range targetData.Targets {
|
||||
// Split the first number off of the target with : separator and use as the port
|
||||
parts := strings.Split(t, ":")
|
||||
if len(parts) != 3 {
|
||||
logger.Info("Invalid target format: %s", t)
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the port as an int
|
||||
port := 0
|
||||
_, err := fmt.Sscanf(parts[0], "%d", &port)
|
||||
if err != nil {
|
||||
logger.Info("Invalid port: %s", parts[0])
|
||||
continue
|
||||
}
|
||||
|
||||
switch action {
|
||||
case "add":
|
||||
target := parts[1] + ":" + parts[2]
|
||||
|
||||
// Call updown script if provided
|
||||
processedTarget := target
|
||||
if updownScript != "" {
|
||||
newTarget, err := executeUpdownScript(action, proto, target)
|
||||
if err != nil {
|
||||
logger.Warn("Updown script error: %v", err)
|
||||
} else if newTarget != "" {
|
||||
processedTarget = newTarget
|
||||
}
|
||||
}
|
||||
|
||||
// Only remove the specific target if it exists
|
||||
err := pm.RemoveTarget(proto, tunnelIP, port)
|
||||
if err != nil {
|
||||
// Ignore "target not found" errors as this is expected for new targets
|
||||
if !strings.Contains(err.Error(), "target not found") {
|
||||
logger.Error("Failed to remove existing target: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new target
|
||||
pm.AddTarget(proto, tunnelIP, port, processedTarget)
|
||||
|
||||
case "remove":
|
||||
logger.Info("Removing target with port %d", port)
|
||||
|
||||
target := parts[1] + ":" + parts[2]
|
||||
|
||||
// Call updown script if provided
|
||||
if updownScript != "" {
|
||||
_, err := executeUpdownScript(action, proto, target)
|
||||
if err != nil {
|
||||
logger.Warn("Updown script error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
err := pm.RemoveTarget(proto, tunnelIP, port)
|
||||
if err != nil {
|
||||
logger.Error("Failed to remove target: %v", err)
|
||||
return err
|
||||
}
|
||||
default:
|
||||
logger.Info("Unknown action: %s", action)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func executeUpdownScript(action, proto, target string) (string, error) {
|
||||
if updownScript == "" {
|
||||
return target, nil
|
||||
}
|
||||
|
||||
// Split the updownScript in case it contains spaces (like "/usr/bin/python3 script.py")
|
||||
parts := strings.Fields(updownScript)
|
||||
if len(parts) == 0 {
|
||||
return target, fmt.Errorf("invalid updown script command")
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if len(parts) == 1 {
|
||||
// If it's a single executable
|
||||
logger.Info("Executing updown script: %s %s %s %s", updownScript, action, proto, target)
|
||||
cmd = exec.Command(parts[0], action, proto, target)
|
||||
} else {
|
||||
// If it includes interpreter and script
|
||||
args := append(parts[1:], action, proto, target)
|
||||
logger.Info("Executing updown script: %s %s %s %s %s", parts[0], strings.Join(parts[1:], " "), action, proto, target)
|
||||
cmd = exec.Command(parts[0], args...)
|
||||
}
|
||||
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return "", fmt.Errorf("updown script execution failed (exit code %d): %s",
|
||||
exitErr.ExitCode(), string(exitErr.Stderr))
|
||||
}
|
||||
return "", fmt.Errorf("updown script execution failed: %v", err)
|
||||
}
|
||||
|
||||
// If the script returns a new target, use it
|
||||
newTarget := strings.TrimSpace(string(output))
|
||||
if newTarget != "" {
|
||||
logger.Info("Updown script returned new target: %s", newTarget)
|
||||
return newTarget, nil
|
||||
}
|
||||
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func sendBlueprint(client *websocket.Client) error {
|
||||
if blueprintFile == "" {
|
||||
return nil
|
||||
}
|
||||
// try to read the blueprint file
|
||||
blueprintData, err := os.ReadFile(blueprintFile)
|
||||
if err != nil {
|
||||
logger.Error("Failed to read blueprint file: %v", err)
|
||||
} else {
|
||||
// first we should convert the yaml to json and error if the yaml is bad
|
||||
var yamlObj interface{}
|
||||
var blueprintJsonData string
|
||||
|
||||
err = yaml.Unmarshal(blueprintData, &yamlObj)
|
||||
if err != nil {
|
||||
logger.Error("Failed to parse blueprint YAML: %v", err)
|
||||
} else {
|
||||
// convert to json
|
||||
jsonBytes, err := json.Marshal(yamlObj)
|
||||
if err != nil {
|
||||
logger.Error("Failed to convert blueprint to JSON: %v", err)
|
||||
} else {
|
||||
blueprintJsonData = string(jsonBytes)
|
||||
logger.Debug("Converted blueprint to JSON: %s", blueprintJsonData)
|
||||
}
|
||||
}
|
||||
|
||||
// if we have valid json data, we can send it to the server
|
||||
if blueprintJsonData == "" {
|
||||
logger.Error("No valid blueprint JSON data to send to server")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Info("Sending blueprint to server for application")
|
||||
|
||||
// send the blueprint data to the server
|
||||
err = client.SendMessage("newt/blueprint/apply", map[string]interface{}{
|
||||
"blueprint": blueprintJsonData,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
44
device/tun_unix.go
Normal file
44
device/tun_unix.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build !windows
|
||||
|
||||
package device
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
"golang.zx2c4.com/wireguard/ipc"
|
||||
"golang.zx2c4.com/wireguard/tun"
|
||||
)
|
||||
|
||||
func CreateTUNFromFD(tunFd uint32, mtuInt int) (tun.Device, error) {
|
||||
dupTunFd, err := unix.Dup(int(tunFd))
|
||||
if err != nil {
|
||||
logger.Error("Unable to dup tun fd: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = unix.SetNonblock(dupTunFd, true)
|
||||
if err != nil {
|
||||
unix.Close(dupTunFd)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file := os.NewFile(uintptr(dupTunFd), "/dev/tun")
|
||||
device, err := tun.CreateTUNFromFile(file, mtuInt)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return device, nil
|
||||
}
|
||||
|
||||
func UapiOpen(interfaceName string) (*os.File, error) {
|
||||
return ipc.UAPIOpen(interfaceName)
|
||||
}
|
||||
|
||||
func UapiListen(interfaceName string, fileUAPI *os.File) (net.Listener, error) {
|
||||
return ipc.UAPIListen(interfaceName, fileUAPI)
|
||||
}
|
||||
25
device/tun_windows.go
Normal file
25
device/tun_windows.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build windows
|
||||
|
||||
package device
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"golang.zx2c4.com/wireguard/ipc"
|
||||
"golang.zx2c4.com/wireguard/tun"
|
||||
)
|
||||
|
||||
func CreateTUNFromFD(tunFd uint32, mtuInt int) (tun.Device, error) {
|
||||
return nil, errors.New("CreateTUNFromFile not supported on Windows")
|
||||
}
|
||||
|
||||
func UapiOpen(interfaceName string) (*os.File, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func UapiListen(interfaceName string, fileUAPI *os.File) (net.Listener, error) {
|
||||
// On Windows, UAPIListen only takes one parameter
|
||||
return ipc.UAPIListen(interfaceName)
|
||||
}
|
||||
41
docker-compose.metrics.collector.yml
Normal file
41
docker-compose.metrics.collector.yml
Normal file
@@ -0,0 +1,41 @@
|
||||
services:
|
||||
newt:
|
||||
build: .
|
||||
image: newt:dev
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
- NEWT_METRICS_PROMETHEUS_ENABLED=false # important: disable direct /metrics scraping
|
||||
- NEWT_METRICS_OTLP_ENABLED=true # OTLP to the Collector
|
||||
# optional:
|
||||
# - NEWT_METRICS_INCLUDE_TUNNEL_ID=false
|
||||
# When using the Collector pattern, do NOT map the Newt admin/metrics port
|
||||
# (2112) on the application service. Mapping 2112 here can cause port
|
||||
# conflicts and may result in duplicated Prometheus scraping (app AND
|
||||
# collector being scraped for the same metrics). Instead either:
|
||||
# - leave ports unset on the app service (recommended), or
|
||||
# - map 2112 only on a dedicated metrics/collector service that is
|
||||
# responsible for exposing metrics to Prometheus.
|
||||
# Example: do NOT map here
|
||||
# ports: []
|
||||
# Example: map 2112 only on a collector service
|
||||
# collector:
|
||||
# ports:
|
||||
# - "2112:2112" # collector's prometheus exporter (scraped by Prometheus)
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:latest
|
||||
command: ["--config=/etc/otelcol/config.yaml"]
|
||||
volumes:
|
||||
- ./examples/otel-collector.yaml:/etc/otelcol/config.yaml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "8889:8889" # Prometheus Exporter (scraped by Prometheus)
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
volumes:
|
||||
- ./examples/prometheus.with-collector.yml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
56
docker-compose.metrics.yml
Normal file
56
docker-compose.metrics.yml
Normal file
@@ -0,0 +1,56 @@
|
||||
name: Newt-Metrics
|
||||
services:
|
||||
# Recommended Variant A: Direct Prometheus scrape of Newt (/metrics)
|
||||
# Optional: You may add the Collector service and enable OTLP export, but do NOT
|
||||
# scrape both Newt and the Collector for the same process.
|
||||
|
||||
newt:
|
||||
build: .
|
||||
image: newt:dev
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
OTEL_SERVICE_NAME: newt
|
||||
NEWT_METRICS_PROMETHEUS_ENABLED: "true"
|
||||
NEWT_METRICS_OTLP_ENABLED: "false" # avoid double-scrape by default
|
||||
NEWT_ADMIN_ADDR: ":2112"
|
||||
# Base NEWT configuration
|
||||
PANGOLIN_ENDPOINT: ${PANGOLIN_ENDPOINT}
|
||||
NEWT_ID: ${NEWT_ID}
|
||||
NEWT_SECRET: ${NEWT_SECRET}
|
||||
LOG_LEVEL: "DEBUG"
|
||||
ports:
|
||||
- "2112:2112"
|
||||
|
||||
# Optional Variant B: Enable the Collector and switch Prometheus scrape to it.
|
||||
# collector:
|
||||
# image: otel/opentelemetry-collector-contrib:0.136.0
|
||||
# command: ["--config=/etc/otelcol/config.yaml"]
|
||||
# volumes:
|
||||
# - ./examples/otel-collector.yaml:/etc/otelcol/config.yaml:ro
|
||||
# ports:
|
||||
# - "4317:4317" # OTLP gRPC in
|
||||
# - "8889:8889" # Prometheus scrape out
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.6.0
|
||||
volumes:
|
||||
- ./examples/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
ports:
|
||||
- "9090:9090"
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:12.2.0
|
||||
container_name: newt-metrics-grafana
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
ports:
|
||||
- "3005:3000"
|
||||
depends_on:
|
||||
- prometheus
|
||||
volumes:
|
||||
- ./examples/grafana/provisioning/datasources:/etc/grafana/provisioning/datasources:ro
|
||||
- ./examples/grafana/provisioning/dashboards:/etc/grafana/provisioning/dashboards:ro
|
||||
- ./examples/grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
@@ -4,7 +4,7 @@ services:
|
||||
container_name: newt
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- PANGOLIN_ENDPOINT=https://example.com
|
||||
- PANGOLIN_ENDPOINT=https://app.pangolin.net
|
||||
- NEWT_ID=2ix2t8xk22ubpfy
|
||||
- NEWT_SECRET=nnisrfsdfc7prqsp9ewo1dvtvci50j5uiqotez00dgap0ii2
|
||||
- LOG_LEVEL=DEBUG
|
||||
449
docker/docker.go
Normal file
449
docker/docker.go
Normal file
@@ -0,0 +1,449 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// Container represents a Docker container
|
||||
type Container struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Image string `json:"image"`
|
||||
State string `json:"state"`
|
||||
Status string `json:"status"`
|
||||
Ports []Port `json:"ports"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Created int64 `json:"created"`
|
||||
Networks map[string]Network `json:"networks"`
|
||||
Hostname string `json:"hostname"` // added to use hostname if available instead of network address
|
||||
|
||||
}
|
||||
|
||||
// Port represents a port mapping for a Docker container
|
||||
type Port struct {
|
||||
PrivatePort int `json:"privatePort"`
|
||||
PublicPort int `json:"publicPort,omitempty"`
|
||||
Type string `json:"type"`
|
||||
IP string `json:"ip,omitempty"`
|
||||
}
|
||||
|
||||
// Network represents network information for a Docker container
|
||||
type Network struct {
|
||||
NetworkID string `json:"networkId"`
|
||||
EndpointID string `json:"endpointId"`
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
IPAddress string `json:"ipAddress,omitempty"`
|
||||
IPPrefixLen int `json:"ipPrefixLen,omitempty"`
|
||||
IPv6Gateway string `json:"ipv6Gateway,omitempty"`
|
||||
GlobalIPv6Address string `json:"globalIPv6Address,omitempty"`
|
||||
GlobalIPv6PrefixLen int `json:"globalIPv6PrefixLen,omitempty"`
|
||||
MacAddress string `json:"macAddress,omitempty"`
|
||||
Aliases []string `json:"aliases,omitempty"`
|
||||
DNSNames []string `json:"dnsNames,omitempty"`
|
||||
}
|
||||
|
||||
// Strcuture parts of docker api endpoint
|
||||
type dockerHost struct {
|
||||
protocol string // e.g. unix, http, tcp, ssh
|
||||
address string // e.g. "/var/run/docker.sock" or "host:port"
|
||||
}
|
||||
|
||||
// Parse the docker api endpoint into its parts
|
||||
func parseDockerHost(raw string) (dockerHost, error) {
|
||||
switch {
|
||||
case strings.HasPrefix(raw, "unix://"):
|
||||
return dockerHost{"unix", strings.TrimPrefix(raw, "unix://")}, nil
|
||||
case strings.HasPrefix(raw, "ssh://"):
|
||||
// SSH is treated as TCP-like transport by the docker client
|
||||
return dockerHost{"ssh", strings.TrimPrefix(raw, "ssh://")}, nil
|
||||
case strings.HasPrefix(raw, "tcp://"), strings.HasPrefix(raw, "http://"), strings.HasPrefix(raw, "https://"):
|
||||
s := raw
|
||||
s = strings.TrimPrefix(s, "tcp://")
|
||||
s = strings.TrimPrefix(s, "http://")
|
||||
s = strings.TrimPrefix(s, "https://")
|
||||
return dockerHost{"tcp", s}, nil
|
||||
case strings.HasPrefix(raw, "/"):
|
||||
// Absolute path without scheme - treat as unix socket
|
||||
return dockerHost{"unix", raw}, nil
|
||||
default:
|
||||
// For relative paths or other formats, also default to unix
|
||||
return dockerHost{"unix", raw}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CheckSocket checks if Docker socket is available
|
||||
func CheckSocket(socketPath string) bool {
|
||||
// Use the provided socket path or default to standard location
|
||||
if socketPath == "" {
|
||||
socketPath = "unix:///var/run/docker.sock"
|
||||
}
|
||||
|
||||
// Ensure the socket path is properly formatted
|
||||
if !strings.Contains(socketPath, "://") {
|
||||
// If no scheme provided, assume unix socket
|
||||
socketPath = "unix://" + socketPath
|
||||
}
|
||||
|
||||
host, err := parseDockerHost(socketPath)
|
||||
if err != nil {
|
||||
logger.Debug("Invalid Docker socket path '%s': %v", socketPath, err)
|
||||
return false
|
||||
}
|
||||
protocol := host.protocol
|
||||
addr := host.address
|
||||
|
||||
// ssh might need different verification, but tcp works for basic reachability
|
||||
conn, err := net.DialTimeout(protocol, addr, 2*time.Second)
|
||||
if err != nil {
|
||||
logger.Debug("Docker not reachable via %s at %s: %v", protocol, addr, err)
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
logger.Debug("Docker reachable via %s at %s", protocol, addr)
|
||||
return true
|
||||
}
|
||||
|
||||
// IsWithinHostNetwork checks if a provided target is within the host container network
|
||||
func IsWithinHostNetwork(socketPath string, targetAddress string, targetPort int) (bool, error) {
|
||||
// Always enforce network validation
|
||||
containers, err := ListContainers(socketPath, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Determine if given an IP address
|
||||
var parsedTargetAddressIp = net.ParseIP(targetAddress)
|
||||
|
||||
// If we can find the passed hostname/IP address in the networks or as the container name, it is valid and can add it
|
||||
for _, c := range containers {
|
||||
for _, network := range c.Networks {
|
||||
// If the target address is not an IP address, use the container name
|
||||
if parsedTargetAddressIp == nil {
|
||||
if c.Name == targetAddress {
|
||||
for _, port := range c.Ports {
|
||||
if port.PublicPort == targetPort || port.PrivatePort == targetPort {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
//If the IP address matches, check the ports being mapped too
|
||||
if network.IPAddress == targetAddress {
|
||||
for _, port := range c.Ports {
|
||||
if port.PublicPort == targetPort || port.PrivatePort == targetPort {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
combinedTargetAddress := targetAddress + ":" + strconv.Itoa(targetPort)
|
||||
return false, fmt.Errorf("target address not within host container network: %s", combinedTargetAddress)
|
||||
}
|
||||
|
||||
// ListContainers lists all Docker containers with their network information
|
||||
func ListContainers(socketPath string, enforceNetworkValidation bool) ([]Container, error) {
|
||||
// Use the provided socket path or default to standard location
|
||||
if socketPath == "" {
|
||||
socketPath = "unix:///var/run/docker.sock"
|
||||
}
|
||||
|
||||
// Ensure the socket path is properly formatted for the Docker client
|
||||
if !strings.Contains(socketPath, "://") {
|
||||
// If no scheme provided, assume unix socket
|
||||
socketPath = "unix://" + socketPath
|
||||
}
|
||||
|
||||
// Used to filter down containers returned to Pangolin
|
||||
containerFilters := filters.NewArgs()
|
||||
|
||||
// Used to determine if we will send IP addresses or hostnames to Pangolin
|
||||
useContainerIpAddresses := true
|
||||
hostContainerId := ""
|
||||
|
||||
// Create a new Docker client
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Create client with custom socket path
|
||||
cli, err := client.NewClientWithOpts(
|
||||
client.WithHost(socketPath),
|
||||
client.WithAPIVersionNegotiation(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %v", err)
|
||||
}
|
||||
|
||||
defer cli.Close()
|
||||
|
||||
hostContainer, err := getHostContainer(ctx, cli)
|
||||
if enforceNetworkValidation && err != nil {
|
||||
return nil, fmt.Errorf("network validation enforced, cannot validate due to: %w", err)
|
||||
}
|
||||
|
||||
// We may not be able to get back host container in scenarios like running the container in network mode 'host'
|
||||
if hostContainer != nil {
|
||||
// We can use the host container to filter out the list of returned containers
|
||||
hostContainerId = hostContainer.ID
|
||||
|
||||
for hostContainerNetworkName := range hostContainer.NetworkSettings.Networks {
|
||||
// If we're enforcing network validation, we'll filter on the host containers networks
|
||||
if enforceNetworkValidation {
|
||||
containerFilters.Add("network", hostContainerNetworkName)
|
||||
}
|
||||
|
||||
// If the container is on the docker bridge network, we will use IP addresses over hostnames
|
||||
if useContainerIpAddresses && hostContainerNetworkName != "bridge" {
|
||||
useContainerIpAddresses = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// List containers
|
||||
containers, err := cli.ContainerList(ctx, container.ListOptions{All: true, Filters: containerFilters})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list containers: %v", err)
|
||||
}
|
||||
|
||||
var dockerContainers []Container
|
||||
for _, c := range containers {
|
||||
// Short ID like docker ps
|
||||
shortId := c.ID[:12]
|
||||
|
||||
// Inspect container to get hostname
|
||||
hostname := ""
|
||||
containerInfo, err := cli.ContainerInspect(ctx, c.ID)
|
||||
if err == nil && containerInfo.Config != nil {
|
||||
hostname = containerInfo.Config.Hostname
|
||||
}
|
||||
|
||||
// Skip host container if set
|
||||
if hostContainerId != "" && c.ID == hostContainerId {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get container name (remove leading slash)
|
||||
name := ""
|
||||
if len(c.Names) > 0 {
|
||||
name = strings.TrimPrefix(c.Names[0], "/")
|
||||
}
|
||||
|
||||
// Convert ports
|
||||
var ports []Port
|
||||
for _, port := range c.Ports {
|
||||
dockerPort := Port{
|
||||
PrivatePort: int(port.PrivatePort),
|
||||
Type: port.Type,
|
||||
}
|
||||
if port.PublicPort != 0 {
|
||||
dockerPort.PublicPort = int(port.PublicPort)
|
||||
}
|
||||
if port.IP != "" {
|
||||
dockerPort.IP = port.IP
|
||||
}
|
||||
ports = append(ports, dockerPort)
|
||||
}
|
||||
|
||||
// Get network information by inspecting the container
|
||||
networks := make(map[string]Network)
|
||||
|
||||
// Extract network information from inspection
|
||||
if c.NetworkSettings != nil && c.NetworkSettings.Networks != nil {
|
||||
for networkName, endpoint := range c.NetworkSettings.Networks {
|
||||
dockerNetwork := Network{
|
||||
NetworkID: endpoint.NetworkID,
|
||||
EndpointID: endpoint.EndpointID,
|
||||
Gateway: endpoint.Gateway,
|
||||
IPPrefixLen: endpoint.IPPrefixLen,
|
||||
IPv6Gateway: endpoint.IPv6Gateway,
|
||||
GlobalIPv6Address: endpoint.GlobalIPv6Address,
|
||||
GlobalIPv6PrefixLen: endpoint.GlobalIPv6PrefixLen,
|
||||
MacAddress: endpoint.MacAddress,
|
||||
Aliases: endpoint.Aliases,
|
||||
DNSNames: endpoint.DNSNames,
|
||||
}
|
||||
|
||||
// Use IPs over hostnames/containers as we're on the bridge network
|
||||
if useContainerIpAddresses {
|
||||
dockerNetwork.IPAddress = endpoint.IPAddress
|
||||
}
|
||||
|
||||
networks[networkName] = dockerNetwork
|
||||
}
|
||||
}
|
||||
|
||||
dockerContainer := Container{
|
||||
ID: shortId,
|
||||
Name: name,
|
||||
Image: c.Image,
|
||||
State: c.State,
|
||||
Status: c.Status,
|
||||
Ports: ports,
|
||||
Labels: c.Labels,
|
||||
Created: c.Created,
|
||||
Networks: networks,
|
||||
Hostname: hostname, // added
|
||||
}
|
||||
|
||||
dockerContainers = append(dockerContainers, dockerContainer)
|
||||
}
|
||||
|
||||
return dockerContainers, nil
|
||||
}
|
||||
|
||||
// getHostContainer gets the current container for the current host if possible
|
||||
func getHostContainer(dockerContext context.Context, dockerClient *client.Client) (*container.InspectResponse, error) {
|
||||
// Get hostname from the os
|
||||
hostContainerName, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find hostname for container")
|
||||
}
|
||||
|
||||
// Get host container from the docker socket
|
||||
hostContainer, err := dockerClient.ContainerInspect(dockerContext, hostContainerName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find host container")
|
||||
}
|
||||
|
||||
return &hostContainer, nil
|
||||
}
|
||||
|
||||
// EventCallback defines the function signature for handling Docker events
|
||||
type EventCallback func(containers []Container)
|
||||
|
||||
// EventMonitor handles Docker event monitoring
|
||||
type EventMonitor struct {
|
||||
client *client.Client
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
callback EventCallback
|
||||
socketPath string
|
||||
enforceNetworkValidation bool
|
||||
}
|
||||
|
||||
// NewEventMonitor creates a new Docker event monitor
|
||||
func NewEventMonitor(socketPath string, enforceNetworkValidation bool, callback EventCallback) (*EventMonitor, error) {
|
||||
if socketPath == "" {
|
||||
socketPath = "unix:///var/run/docker.sock"
|
||||
}
|
||||
|
||||
if !strings.Contains(socketPath, "://") {
|
||||
socketPath = "unix://" + socketPath
|
||||
}
|
||||
|
||||
cli, err := client.NewClientWithOpts(
|
||||
client.WithHost(socketPath),
|
||||
client.WithAPIVersionNegotiation(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Docker client: %v", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &EventMonitor{
|
||||
client: cli,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
callback: callback,
|
||||
socketPath: socketPath,
|
||||
enforceNetworkValidation: enforceNetworkValidation,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start begins monitoring Docker events
|
||||
func (em *EventMonitor) Start() error {
|
||||
logger.Debug("Starting Docker event monitoring")
|
||||
|
||||
// Filter for container events we care about
|
||||
eventFilters := filters.NewArgs()
|
||||
eventFilters.Add("type", "container")
|
||||
// eventFilters.Add("event", "create")
|
||||
eventFilters.Add("event", "start")
|
||||
eventFilters.Add("event", "stop")
|
||||
// eventFilters.Add("event", "destroy")
|
||||
// eventFilters.Add("event", "die")
|
||||
// eventFilters.Add("event", "pause")
|
||||
// eventFilters.Add("event", "unpause")
|
||||
|
||||
// Start listening for events
|
||||
eventCh, errCh := em.client.Events(em.ctx, events.ListOptions{
|
||||
Filters: eventFilters,
|
||||
})
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := em.client.Close(); err != nil {
|
||||
logger.Error("Error closing Docker client: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case event := <-eventCh:
|
||||
logger.Debug("Docker event received: %s %s for container %s", event.Action, event.Type, event.Actor.ID[:12])
|
||||
|
||||
// Fetch updated container list and trigger callback
|
||||
go em.handleEvent(event)
|
||||
|
||||
case err := <-errCh:
|
||||
if err != nil && err != context.Canceled {
|
||||
logger.Error("Docker event stream error: %v", err)
|
||||
// Try to reconnect after a brief delay
|
||||
time.Sleep(5 * time.Second)
|
||||
if em.ctx.Err() == nil {
|
||||
logger.Info("Attempting to reconnect to Docker event stream")
|
||||
eventCh, errCh = em.client.Events(em.ctx, events.ListOptions{
|
||||
Filters: eventFilters,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
case <-em.ctx.Done():
|
||||
logger.Info("Docker event monitoring stopped")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleEvent processes a Docker event and triggers the callback with updated container list
|
||||
func (em *EventMonitor) handleEvent(event events.Message) {
|
||||
// Add a small delay to ensure Docker has fully processed the event
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
containers, err := ListContainers(em.socketPath, em.enforceNetworkValidation)
|
||||
if err != nil {
|
||||
logger.Error("Failed to list containers after Docker event %s: %v", event.Action, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("Triggering callback with %d containers after Docker event %s", len(containers), event.Action)
|
||||
em.callback(containers)
|
||||
}
|
||||
|
||||
// Stop stops the event monitoring
|
||||
func (em *EventMonitor) Stop() {
|
||||
logger.Info("Stopping Docker event monitoring")
|
||||
if em.cancel != nil {
|
||||
em.cancel()
|
||||
}
|
||||
}
|
||||
167
examples/README.md
Normal file
167
examples/README.md
Normal file
@@ -0,0 +1,167 @@
|
||||
# Extensible Logger
|
||||
|
||||
This logger package provides a flexible logging system that can be extended with custom log writers.
|
||||
|
||||
## Basic Usage (Current Behavior)
|
||||
|
||||
The logger works exactly as before with no changes required:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "your-project/logger"
|
||||
|
||||
func main() {
|
||||
// Use default logger
|
||||
logger.Info("This works as before")
|
||||
logger.Debug("Debug message")
|
||||
logger.Error("Error message")
|
||||
|
||||
// Or create a custom instance
|
||||
log := logger.NewLogger()
|
||||
log.SetLevel(logger.INFO)
|
||||
log.Info("Custom logger instance")
|
||||
}
|
||||
```
|
||||
|
||||
## Custom Log Writers
|
||||
|
||||
To use a custom log backend, implement the `LogWriter` interface:
|
||||
|
||||
```go
|
||||
type LogWriter interface {
|
||||
Write(level LogLevel, timestamp time.Time, message string)
|
||||
}
|
||||
```
|
||||
|
||||
### Example: OS Log Writer (macOS/iOS)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import "your-project/logger"
|
||||
|
||||
func main() {
|
||||
// Create an OS log writer
|
||||
osWriter := logger.NewOSLogWriter(
|
||||
"net.pangolin.Pangolin.PacketTunnel",
|
||||
"PangolinGo",
|
||||
"MyApp",
|
||||
)
|
||||
|
||||
// Create a logger with the OS log writer
|
||||
log := logger.NewLoggerWithWriter(osWriter)
|
||||
log.SetLevel(logger.DEBUG)
|
||||
|
||||
// Use it just like the standard logger
|
||||
log.Info("This message goes to os_log")
|
||||
log.Error("Error logged to os_log")
|
||||
}
|
||||
```
|
||||
|
||||
### Example: Custom Writer
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
"your-project/logger"
|
||||
)
|
||||
|
||||
// CustomWriter writes logs to a custom destination
|
||||
type CustomWriter struct {
|
||||
// your custom fields
|
||||
}
|
||||
|
||||
func (w *CustomWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
// Your custom logging logic
|
||||
fmt.Printf("[CUSTOM] %s [%s] %s\n", timestamp.Format(time.RFC3339), level.String(), message)
|
||||
}
|
||||
|
||||
func main() {
|
||||
customWriter := &CustomWriter{}
|
||||
log := logger.NewLoggerWithWriter(customWriter)
|
||||
log.Info("Custom logging!")
|
||||
}
|
||||
```
|
||||
|
||||
### Example: Multi-Writer (Log to Multiple Destinations)
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
"your-project/logger"
|
||||
)
|
||||
|
||||
// MultiWriter writes to multiple log writers
|
||||
type MultiWriter struct {
|
||||
writers []logger.LogWriter
|
||||
}
|
||||
|
||||
func NewMultiWriter(writers ...logger.LogWriter) *MultiWriter {
|
||||
return &MultiWriter{writers: writers}
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
for _, writer := range w.writers {
|
||||
writer.Write(level, timestamp, message)
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Log to both standard output and OS log
|
||||
standardWriter := logger.NewStandardWriter()
|
||||
osWriter := logger.NewOSLogWriter("com.example.app", "Main", "App")
|
||||
|
||||
multiWriter := NewMultiWriter(standardWriter, osWriter)
|
||||
log := logger.NewLoggerWithWriter(multiWriter)
|
||||
|
||||
log.Info("This goes to both stdout and os_log!")
|
||||
}
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### Creating Loggers
|
||||
|
||||
- `NewLogger()` - Creates a logger with the default StandardWriter
|
||||
- `NewLoggerWithWriter(writer LogWriter)` - Creates a logger with a custom writer
|
||||
|
||||
### Built-in Writers
|
||||
|
||||
- `NewStandardWriter()` - Standard writer that outputs to stdout (default)
|
||||
- `NewOSLogWriter(subsystem, category, prefix string)` - OS log writer for macOS/iOS (example)
|
||||
|
||||
### Logger Methods
|
||||
|
||||
- `SetLevel(level LogLevel)` - Set minimum log level
|
||||
- `SetOutput(output *os.File)` - Set output file (StandardWriter only)
|
||||
- `Debug(format string, args ...interface{})` - Log debug message
|
||||
- `Info(format string, args ...interface{})` - Log info message
|
||||
- `Warn(format string, args ...interface{})` - Log warning message
|
||||
- `Error(format string, args ...interface{})` - Log error message
|
||||
- `Fatal(format string, args ...interface{})` - Log fatal message and exit
|
||||
|
||||
### Global Functions
|
||||
|
||||
For convenience, you can use global functions that use the default logger:
|
||||
|
||||
- `logger.Debug(format, args...)`
|
||||
- `logger.Info(format, args...)`
|
||||
- `logger.Warn(format, args...)`
|
||||
- `logger.Error(format, args...)`
|
||||
- `logger.Fatal(format, args...)`
|
||||
- `logger.SetOutput(output *os.File)`
|
||||
|
||||
## Migration Guide
|
||||
|
||||
No changes needed! The logger maintains 100% backward compatibility. Your existing code will continue to work without modifications.
|
||||
|
||||
If you want to switch to a custom writer:
|
||||
1. Create your writer implementing `LogWriter`
|
||||
2. Use `NewLoggerWithWriter()` instead of `NewLogger()`
|
||||
3. That's it!
|
||||
898
examples/grafana/dashboards/newt-overview.json
Normal file
898
examples/grafana/dashboards/newt-overview.json
Normal file
@@ -0,0 +1,898 @@
|
||||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 0,
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 500
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "value_and_name"
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "go_goroutine_count",
|
||||
"instant": true,
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Goroutines",
|
||||
"transformations": [],
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 1,
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "orange",
|
||||
"value": 256
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 512
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 6,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"colorMode": "background",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "value_and_name"
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "go_memory_gc_goal_bytes / 1024 / 1024",
|
||||
"format": "time_series",
|
||||
"instant": true,
|
||||
"legendFormat": "",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "GC Target Heap (MiB)",
|
||||
"transformations": [],
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 2,
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "orange",
|
||||
"value": 10
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 25
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "value_and_name"
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(http_server_request_duration_seconds_count[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "req/s",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "HTTP Requests / s",
|
||||
"transformations": [],
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"decimals": 3,
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "orange",
|
||||
"value": 0.1
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 0.5
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 7,
|
||||
"w": 6,
|
||||
"x": 18,
|
||||
"y": 0
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"colorMode": "value",
|
||||
"graphMode": "area",
|
||||
"justifyMode": "auto",
|
||||
"orientation": "horizontal",
|
||||
"reduceOptions": {
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": "",
|
||||
"values": false
|
||||
},
|
||||
"textMode": "value_and_name"
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(newt_connection_errors_total{site_id=~\"$site_id\"}[$__rate_interval]))",
|
||||
"instant": false,
|
||||
"legendFormat": "errors/s",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Connection Errors / s",
|
||||
"transformations": [],
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "bytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 7
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(go_memory_used_bytes)",
|
||||
"legendFormat": "Used",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "go_memory_gc_goal_bytes",
|
||||
"legendFormat": "GC Goal",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Go Heap Usage vs GC Goal",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"decimals": 0,
|
||||
"mappings": [],
|
||||
"unit": "short"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 7
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(go_memory_allocations_total[$__rate_interval])",
|
||||
"legendFormat": "Allocations/s",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "rate(go_memory_allocated_bytes_total[$__rate_interval])",
|
||||
"legendFormat": "Allocated bytes/s",
|
||||
"refId": "B"
|
||||
}
|
||||
],
|
||||
"title": "Allocation Activity",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 7,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.5, sum(rate(http_server_request_duration_seconds_bucket[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p50",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.95, sum(rate(http_server_request_duration_seconds_bucket[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p95",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(http_server_request_duration_seconds_bucket[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p99",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "HTTP Request Duration Quantiles",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 16
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(http_server_request_duration_seconds_count[$__rate_interval])) by (http_response_status_code)",
|
||||
"legendFormat": "{{http_response_status_code}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "HTTP Requests by Status",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 25
|
||||
},
|
||||
"id": 9,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(newt_connection_attempts_total{site_id=~\"$site_id\"}[$__rate_interval])) by (transport, result)",
|
||||
"legendFormat": "{{transport}} • {{result}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Connection Attempts by Transport/Result",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "ops"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 25
|
||||
},
|
||||
"id": 10,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(newt_connection_errors_total{site_id=~\"$site_id\"}[$__rate_interval])) by (transport, error_type)",
|
||||
"legendFormat": "{{transport}} • {{error_type}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Connection Errors by Type",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"decimals": 3,
|
||||
"mappings": [],
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 34
|
||||
},
|
||||
"id": 11,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "table",
|
||||
"placement": "right"
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "multi",
|
||||
"sort": "desc"
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.5, sum(rate(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\", tunnel_id=~\"$tunnel_id\"}[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p50",
|
||||
"refId": "A"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.95, sum(rate(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\", tunnel_id=~\"$tunnel_id\"}[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p95",
|
||||
"refId": "B"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "histogram_quantile(0.99, sum(rate(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\", tunnel_id=~\"$tunnel_id\"}[$__rate_interval])) by (le))",
|
||||
"legendFormat": "p99",
|
||||
"refId": "C"
|
||||
}
|
||||
],
|
||||
"title": "Tunnel Latency Quantiles",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"cards": {},
|
||||
"color": {
|
||||
"cardColor": "#b4ff00",
|
||||
"colorScale": "sqrt",
|
||||
"colorScheme": "interpolateTurbo"
|
||||
},
|
||||
"dataFormat": "tsbuckets",
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {},
|
||||
"mappings": [],
|
||||
"unit": "s"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 9,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 34
|
||||
},
|
||||
"heatmap": {},
|
||||
"hideZeroBuckets": true,
|
||||
"id": 12,
|
||||
"legend": {
|
||||
"show": false
|
||||
},
|
||||
"options": {
|
||||
"calculate": true,
|
||||
"cellGap": 2,
|
||||
"cellSize": "auto",
|
||||
"color": {
|
||||
"exponent": 0.5
|
||||
},
|
||||
"exemplars": {
|
||||
"color": "rgba(255,255,255,0.7)"
|
||||
},
|
||||
"filterValues": {
|
||||
"le": 1e-9
|
||||
},
|
||||
"legend": {
|
||||
"show": false
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"show": true
|
||||
},
|
||||
"xAxis": {
|
||||
"show": true
|
||||
},
|
||||
"yAxis": {
|
||||
"decimals": 3,
|
||||
"show": true
|
||||
}
|
||||
},
|
||||
"pluginVersion": "11.1.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"expr": "sum(rate(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\", tunnel_id=~\"$tunnel_id\"}[$__rate_interval])) by (le)",
|
||||
"format": "heatmap",
|
||||
"legendFormat": "{{le}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Tunnel Latency Bucket Rate",
|
||||
"type": "heatmap"
|
||||
}
|
||||
],
|
||||
"refresh": "30s",
|
||||
"schemaVersion": 39,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"newt",
|
||||
"otel"
|
||||
],
|
||||
"templating": {
|
||||
"list": [
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "Prometheus",
|
||||
"value": "prometheus"
|
||||
},
|
||||
"hide": 0,
|
||||
"label": "Datasource",
|
||||
"name": "DS_PROMETHEUS",
|
||||
"options": [],
|
||||
"query": "prometheus",
|
||||
"refresh": 1,
|
||||
"type": "datasource"
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"definition": "label_values(target_info, site_id)",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Site",
|
||||
"multi": true,
|
||||
"name": "site_id",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(target_info, site_id)",
|
||||
"refId": "SiteIdVar"
|
||||
},
|
||||
"refresh": 2,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "All",
|
||||
"value": "$__all"
|
||||
},
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "prometheus"
|
||||
},
|
||||
"definition": "label_values(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\"}, tunnel_id)",
|
||||
"hide": 0,
|
||||
"includeAll": true,
|
||||
"label": "Tunnel",
|
||||
"multi": true,
|
||||
"name": "tunnel_id",
|
||||
"options": [],
|
||||
"query": {
|
||||
"query": "label_values(newt_tunnel_latency_seconds_bucket{site_id=~\"$site_id\"}, tunnel_id)",
|
||||
"refId": "TunnelVar"
|
||||
},
|
||||
"refresh": 2,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 1,
|
||||
"tagValuesQuery": "",
|
||||
"tags": [],
|
||||
"tagsQuery": "",
|
||||
"type": "query",
|
||||
"useTags": false
|
||||
}
|
||||
]
|
||||
},
|
||||
"time": {
|
||||
"from": "now-6h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {
|
||||
"refresh_intervals": [
|
||||
"10s",
|
||||
"30s",
|
||||
"1m",
|
||||
"5m",
|
||||
"15m",
|
||||
"30m",
|
||||
"1h",
|
||||
"2h",
|
||||
"1d"
|
||||
],
|
||||
"time_options": [
|
||||
"5m",
|
||||
"15m",
|
||||
"1h",
|
||||
"6h",
|
||||
"12h",
|
||||
"24h",
|
||||
"2d",
|
||||
"7d",
|
||||
"30d"
|
||||
]
|
||||
},
|
||||
"timezone": "browser",
|
||||
"title": "Newt Overview",
|
||||
"uid": "newt-overview",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
||||
9
examples/grafana/provisioning/dashboards/dashboard.yaml
Normal file
9
examples/grafana/provisioning/dashboards/dashboard.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: 1
|
||||
providers:
|
||||
- name: "newt"
|
||||
folder: "Newt"
|
||||
type: file
|
||||
disableDeletion: false
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
@@ -0,0 +1,9 @@
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
uid: prometheus
|
||||
isDefault: true
|
||||
editable: true
|
||||
161
examples/logger_examples.go
Normal file
161
examples/logger_examples.go
Normal file
@@ -0,0 +1,161 @@
|
||||
// Example usage patterns for the extensible logger
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// Example 1: Using the default logger (works exactly as before)
|
||||
func exampleDefaultLogger() {
|
||||
logger.Info("Starting application")
|
||||
logger.Debug("Debug information")
|
||||
logger.Warn("Warning message")
|
||||
logger.Error("Error occurred")
|
||||
}
|
||||
|
||||
// Example 2: Using a custom logger instance with standard writer
|
||||
func exampleCustomInstance() {
|
||||
log := logger.NewLogger()
|
||||
log.SetLevel(logger.INFO)
|
||||
log.Info("This is from a custom instance")
|
||||
}
|
||||
|
||||
// Example 3: Custom writer that adds JSON formatting
|
||||
type JSONWriter struct{}
|
||||
|
||||
func (w *JSONWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
fmt.Printf("{\"time\":\"%s\",\"level\":\"%s\",\"message\":\"%s\"}\n",
|
||||
timestamp.Format(time.RFC3339),
|
||||
level.String(),
|
||||
message)
|
||||
}
|
||||
|
||||
func exampleJSONLogger() {
|
||||
jsonWriter := &JSONWriter{}
|
||||
log := logger.NewLoggerWithWriter(jsonWriter)
|
||||
log.Info("This will be logged as JSON")
|
||||
}
|
||||
|
||||
// Example 4: File writer
|
||||
type FileWriter struct {
|
||||
file *os.File
|
||||
}
|
||||
|
||||
func NewFileWriter(filename string) (*FileWriter, error) {
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FileWriter{file: file}, nil
|
||||
}
|
||||
|
||||
func (w *FileWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
fmt.Fprintf(w.file, "[%s] %s: %s\n",
|
||||
timestamp.Format("2006-01-02 15:04:05"),
|
||||
level.String(),
|
||||
message)
|
||||
}
|
||||
|
||||
func (w *FileWriter) Close() error {
|
||||
return w.file.Close()
|
||||
}
|
||||
|
||||
func exampleFileLogger() {
|
||||
fileWriter, err := NewFileWriter("/tmp/app.log")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer fileWriter.Close()
|
||||
|
||||
log := logger.NewLoggerWithWriter(fileWriter)
|
||||
log.Info("This goes to a file")
|
||||
}
|
||||
|
||||
// Example 5: Multi-writer to log to multiple destinations
|
||||
type MultiWriter struct {
|
||||
writers []logger.LogWriter
|
||||
}
|
||||
|
||||
func NewMultiWriter(writers ...logger.LogWriter) *MultiWriter {
|
||||
return &MultiWriter{writers: writers}
|
||||
}
|
||||
|
||||
func (w *MultiWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
for _, writer := range w.writers {
|
||||
writer.Write(level, timestamp, message)
|
||||
}
|
||||
}
|
||||
|
||||
func exampleMultiWriter() {
|
||||
// Log to both stdout and a file
|
||||
standardWriter := logger.NewStandardWriter()
|
||||
fileWriter, _ := NewFileWriter("/tmp/app.log")
|
||||
|
||||
multiWriter := NewMultiWriter(standardWriter, fileWriter)
|
||||
log := logger.NewLoggerWithWriter(multiWriter)
|
||||
|
||||
log.Info("This goes to both stdout and file!")
|
||||
}
|
||||
|
||||
// Example 6: Conditional writer (only log errors to a specific destination)
|
||||
type ErrorOnlyWriter struct {
|
||||
writer logger.LogWriter
|
||||
}
|
||||
|
||||
func NewErrorOnlyWriter(writer logger.LogWriter) *ErrorOnlyWriter {
|
||||
return &ErrorOnlyWriter{writer: writer}
|
||||
}
|
||||
|
||||
func (w *ErrorOnlyWriter) Write(level logger.LogLevel, timestamp time.Time, message string) {
|
||||
if level >= logger.ERROR {
|
||||
w.writer.Write(level, timestamp, message)
|
||||
}
|
||||
}
|
||||
|
||||
func exampleConditionalWriter() {
|
||||
errorWriter, _ := NewFileWriter("/tmp/errors.log")
|
||||
errorOnlyWriter := NewErrorOnlyWriter(errorWriter)
|
||||
|
||||
log := logger.NewLoggerWithWriter(errorOnlyWriter)
|
||||
log.Info("This won't be logged")
|
||||
log.Error("This will be logged to errors.log")
|
||||
}
|
||||
|
||||
/* Example 7: OS Log Writer (macOS/iOS only)
|
||||
// Uncomment on Darwin platforms
|
||||
|
||||
func exampleOSLogWriter() {
|
||||
osWriter := logger.NewOSLogWriter(
|
||||
"net.pangolin.Pangolin.PacketTunnel",
|
||||
"PangolinGo",
|
||||
"MyApp",
|
||||
)
|
||||
|
||||
log := logger.NewLoggerWithWriter(osWriter)
|
||||
log.Info("This goes to os_log and can be viewed with Console.app")
|
||||
}
|
||||
*/
|
||||
|
||||
func main() {
|
||||
fmt.Println("=== Example 1: Default Logger ===")
|
||||
exampleDefaultLogger()
|
||||
|
||||
fmt.Println("\n=== Example 2: Custom Instance ===")
|
||||
exampleCustomInstance()
|
||||
|
||||
fmt.Println("\n=== Example 3: JSON Logger ===")
|
||||
exampleJSONLogger()
|
||||
|
||||
fmt.Println("\n=== Example 4: File Logger ===")
|
||||
exampleFileLogger()
|
||||
|
||||
fmt.Println("\n=== Example 5: Multi-Writer ===")
|
||||
exampleMultiWriter()
|
||||
|
||||
fmt.Println("\n=== Example 6: Conditional Writer ===")
|
||||
exampleConditionalWriter()
|
||||
}
|
||||
86
examples/oslog_writer_example.go
Normal file
86
examples/oslog_writer_example.go
Normal file
@@ -0,0 +1,86 @@
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -I../PacketTunnel
|
||||
#include "../PacketTunnel/OSLogBridge.h"
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// OSLogWriter is a LogWriter implementation that writes to Apple's os_log
|
||||
type OSLogWriter struct {
|
||||
subsystem string
|
||||
category string
|
||||
prefix string
|
||||
}
|
||||
|
||||
// NewOSLogWriter creates a new OSLogWriter
|
||||
func NewOSLogWriter(subsystem, category, prefix string) *OSLogWriter {
|
||||
writer := &OSLogWriter{
|
||||
subsystem: subsystem,
|
||||
category: category,
|
||||
prefix: prefix,
|
||||
}
|
||||
|
||||
// Initialize the OS log bridge
|
||||
cSubsystem := C.CString(subsystem)
|
||||
cCategory := C.CString(category)
|
||||
defer C.free(unsafe.Pointer(cSubsystem))
|
||||
defer C.free(unsafe.Pointer(cCategory))
|
||||
|
||||
C.initOSLogBridge(cSubsystem, cCategory)
|
||||
|
||||
return writer
|
||||
}
|
||||
|
||||
// Write implements the LogWriter interface
|
||||
func (w *OSLogWriter) Write(level LogLevel, timestamp time.Time, message string) {
|
||||
// Get caller information (skip 3 frames to get to the actual caller)
|
||||
_, file, line, ok := runtime.Caller(3)
|
||||
if !ok {
|
||||
file = "unknown"
|
||||
line = 0
|
||||
} else {
|
||||
// Get just the filename, not the full path
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
file = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
formattedTime := timestamp.Format("2006-01-02 15:04:05.000")
|
||||
fullMessage := fmt.Sprintf("[%s] [%s] [%s] %s:%d - %s",
|
||||
formattedTime, level.String(), w.prefix, file, line, message)
|
||||
|
||||
cMessage := C.CString(fullMessage)
|
||||
defer C.free(unsafe.Pointer(cMessage))
|
||||
|
||||
// Map Go log levels to os_log levels:
|
||||
// 0=DEBUG, 1=INFO, 2=DEFAULT (WARN), 3=ERROR
|
||||
var osLogLevel C.int
|
||||
switch level {
|
||||
case DEBUG:
|
||||
osLogLevel = 0 // DEBUG
|
||||
case INFO:
|
||||
osLogLevel = 1 // INFO
|
||||
case WARN:
|
||||
osLogLevel = 2 // DEFAULT
|
||||
case ERROR, FATAL:
|
||||
osLogLevel = 3 // ERROR
|
||||
default:
|
||||
osLogLevel = 2 // DEFAULT
|
||||
}
|
||||
|
||||
C.logToOSLog(osLogLevel, cMessage)
|
||||
}
|
||||
61
examples/otel-collector.yaml
Normal file
61
examples/otel-collector.yaml
Normal file
@@ -0,0 +1,61 @@
|
||||
# Variant A: Direct scrape of Newt (/metrics) via Prometheus (no Collector needed)
|
||||
# Note: Newt already exposes labels like site_id, protocol, direction. Do not promote
|
||||
# resource attributes into labels when scraping Newt directly.
|
||||
#
|
||||
# Example Prometheus scrape config:
|
||||
# global:
|
||||
# scrape_interval: 15s
|
||||
# scrape_configs:
|
||||
# - job_name: newt
|
||||
# static_configs:
|
||||
# - targets: ["newt:2112"]
|
||||
#
|
||||
# Variant B: Use OTEL Collector (Newt -> OTLP -> Collector -> Prometheus)
|
||||
# This pipeline scrapes metrics from the Collector's Prometheus exporter.
|
||||
# Labels are already on datapoints; promotion from resource is OPTIONAL and typically NOT required.
|
||||
# If you enable transform/promote below, ensure you do not duplicate labels.
|
||||
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: ":4317"
|
||||
|
||||
processors:
|
||||
memory_limiter:
|
||||
check_interval: 5s
|
||||
limit_percentage: 80
|
||||
spike_limit_percentage: 25
|
||||
resourcedetection:
|
||||
detectors: [env, system]
|
||||
timeout: 5s
|
||||
batch: {}
|
||||
# OPTIONAL: Only enable if you need to promote resource attributes to labels.
|
||||
# WARNING: Newt already provides site_id as a label; avoid double-promotion.
|
||||
# transform/promote:
|
||||
# error_mode: ignore
|
||||
# metric_statements:
|
||||
# - context: datapoint
|
||||
# statements:
|
||||
# - set(attributes["service_instance_id"], resource.attributes["service.instance.id"]) where resource.attributes["service.instance.id"] != nil
|
||||
# - set(attributes["site_id"], resource.attributes["site_id"]) where resource.attributes["site_id"] != nil
|
||||
|
||||
exporters:
|
||||
prometheus:
|
||||
endpoint: ":8889"
|
||||
send_timestamps: true
|
||||
# prometheusremotewrite:
|
||||
# endpoint: http://mimir:9009/api/v1/push
|
||||
debug:
|
||||
verbosity: basic
|
||||
|
||||
service:
|
||||
pipelines:
|
||||
metrics:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, resourcedetection, batch] # add transform/promote if you really need it
|
||||
exporters: [prometheus]
|
||||
traces:
|
||||
receivers: [otlp]
|
||||
processors: [memory_limiter, resourcedetection, batch]
|
||||
exporters: [debug]
|
||||
16
examples/prometheus.with-collector.yml
Normal file
16
examples/prometheus.with-collector.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
# IMPORTANT: Do not scrape Newt directly; scrape only the Collector!
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8889']
|
||||
|
||||
# optional: limit metric cardinality
|
||||
relabel_configs:
|
||||
- action: labeldrop
|
||||
regex: 'tunnel_id'
|
||||
# - action: keep
|
||||
# source_labels: [site_id]
|
||||
# regex: '(site-a|site-b)'
|
||||
21
examples/prometheus.yml
Normal file
21
examples/prometheus.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'newt'
|
||||
scrape_interval: 15s
|
||||
static_configs:
|
||||
- targets: ['newt:2112'] # /metrics
|
||||
relabel_configs:
|
||||
# optional: drop tunnel_id
|
||||
- action: labeldrop
|
||||
regex: 'tunnel_id'
|
||||
# optional: allow only specific sites
|
||||
- action: keep
|
||||
source_labels: [site_id]
|
||||
regex: '(site-a|site-b)'
|
||||
|
||||
# WARNING: Do not enable this together with the 'newt' job above or you will double-count.
|
||||
# - job_name: 'otel-collector'
|
||||
# static_configs:
|
||||
# - targets: ['otel-collector:8889']
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1763934636,
|
||||
"narHash": "sha256-9glbI7f1uU+yzQCq5LwLgdZqx6svOhZWkd4JRY265fc=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ee09932cedcef15aaf476f9343d1dea2cb77e261",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
102
flake.nix
Normal file
102
flake.nix
Normal file
@@ -0,0 +1,102 @@
|
||||
{
|
||||
description = "newt - A tunneling client for Pangolin";
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
|
||||
};
|
||||
|
||||
outputs =
|
||||
{ self, nixpkgs }:
|
||||
let
|
||||
supportedSystems = [
|
||||
"x86_64-linux"
|
||||
"aarch64-linux"
|
||||
"x86_64-darwin"
|
||||
"aarch64-darwin"
|
||||
];
|
||||
forAllSystems = nixpkgs.lib.genAttrs supportedSystems;
|
||||
pkgsFor = system: nixpkgs.legacyPackages.${system};
|
||||
in
|
||||
{
|
||||
packages = forAllSystems (
|
||||
system:
|
||||
let
|
||||
pkgs = pkgsFor system;
|
||||
inherit (pkgs) lib;
|
||||
|
||||
# Update version when releasing
|
||||
version = "1.8.0";
|
||||
in
|
||||
{
|
||||
default = self.packages.${system}.pangolin-newt;
|
||||
|
||||
pangolin-newt = pkgs.buildGoModule {
|
||||
pname = "pangolin-newt";
|
||||
inherit version;
|
||||
src = pkgs.nix-gitignore.gitignoreSource [ ] ./.;
|
||||
|
||||
vendorHash = "sha256-Sib6AUCpMgxlMpTc2Esvs+UU0yduVOxWUgT44FHAI+k=";
|
||||
|
||||
nativeInstallCheckInputs = [ pkgs.versionCheckHook ];
|
||||
|
||||
env = {
|
||||
CGO_ENABLED = 0;
|
||||
};
|
||||
|
||||
ldflags = [
|
||||
"-s"
|
||||
"-w"
|
||||
"-X main.newtVersion=${version}"
|
||||
];
|
||||
|
||||
# Tests are broken due to a lack of Internet.
|
||||
# Disable running `go test`, and instead do
|
||||
# a simple version check instead.
|
||||
doCheck = false;
|
||||
doInstallCheck = true;
|
||||
|
||||
versionCheckProgramArg = [ "-version" ];
|
||||
|
||||
meta = {
|
||||
description = "A tunneling client for Pangolin";
|
||||
homepage = "https://github.com/fosrl/newt";
|
||||
license = lib.licenses.gpl3;
|
||||
maintainers = [
|
||||
lib.maintainers.water-sucks
|
||||
];
|
||||
mainProgram = "newt";
|
||||
};
|
||||
};
|
||||
}
|
||||
);
|
||||
devShells = forAllSystems (
|
||||
system:
|
||||
let
|
||||
pkgs = pkgsFor system;
|
||||
|
||||
inherit (pkgs)
|
||||
go
|
||||
gopls
|
||||
gotools
|
||||
go-outline
|
||||
gopkgs
|
||||
godef
|
||||
golint
|
||||
;
|
||||
in
|
||||
{
|
||||
default = pkgs.mkShell {
|
||||
buildInputs = [
|
||||
go
|
||||
gopls
|
||||
gotools
|
||||
go-outline
|
||||
gopkgs
|
||||
godef
|
||||
golint
|
||||
];
|
||||
};
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
235
get-newt.sh
Normal file
235
get-newt.sh
Normal file
@@ -0,0 +1,235 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get Newt - Cross-platform installation script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/newt/refs/heads/main/get-newt.sh | bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# GitHub repository info
|
||||
REPO="fosrl/newt"
|
||||
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to get latest version from GitHub API
|
||||
get_latest_version() {
|
||||
local latest_info
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$latest_info" ]; then
|
||||
print_error "Failed to fetch latest version information" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from JSON response (works without jq)
|
||||
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
print_error "Could not parse version from GitHub API response" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove 'v' prefix if present
|
||||
version=$(echo "$version" | sed 's/^v//')
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Detect OS and architecture
|
||||
detect_platform() {
|
||||
local os arch
|
||||
|
||||
# Detect OS
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
Darwin*) os="darwin" ;;
|
||||
MINGW*|MSYS*|CYGWIN*) os="windows" ;;
|
||||
FreeBSD*) os="freebsd" ;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s)"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Detect architecture
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64) arch="amd64" ;;
|
||||
arm64|aarch64) arch="arm64" ;;
|
||||
armv7l|armv6l)
|
||||
if [ "$os" = "linux" ]; then
|
||||
if [ "$(uname -m)" = "armv6l" ]; then
|
||||
arch="arm32v6"
|
||||
else
|
||||
arch="arm32"
|
||||
fi
|
||||
else
|
||||
arch="arm64" # Default for non-Linux ARM
|
||||
fi
|
||||
;;
|
||||
riscv64)
|
||||
if [ "$os" = "linux" ]; then
|
||||
arch="riscv64"
|
||||
else
|
||||
print_error "RISC-V architecture only supported on Linux"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m)"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os}_${arch}"
|
||||
}
|
||||
|
||||
# Get installation directory
|
||||
get_install_dir() {
|
||||
if [ "$OS" = "windows" ]; then
|
||||
echo "$HOME/bin"
|
||||
else
|
||||
# Try to use a directory in PATH, fallback to ~/.local/bin
|
||||
if echo "$PATH" | grep -q "/usr/local/bin"; then
|
||||
if [ -w "/usr/local/bin" ] 2>/dev/null; then
|
||||
echo "/usr/local/bin"
|
||||
else
|
||||
echo "$HOME/.local/bin"
|
||||
fi
|
||||
else
|
||||
echo "$HOME/.local/bin"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Download and install newt
|
||||
install_newt() {
|
||||
local platform="$1"
|
||||
local install_dir="$2"
|
||||
local binary_name="newt_${platform}"
|
||||
local exe_suffix=""
|
||||
|
||||
# Add .exe suffix for Windows
|
||||
if [[ "$platform" == *"windows"* ]]; then
|
||||
binary_name="${binary_name}.exe"
|
||||
exe_suffix=".exe"
|
||||
fi
|
||||
|
||||
local download_url="${BASE_URL}/${binary_name}"
|
||||
local temp_file="/tmp/newt${exe_suffix}"
|
||||
local final_path="${install_dir}/newt${exe_suffix}"
|
||||
|
||||
print_status "Downloading newt from ${download_url}"
|
||||
|
||||
# Download the binary
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$download_url" -o "$temp_file"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$download_url" -O "$temp_file"
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
mkdir -p "$install_dir"
|
||||
|
||||
# Move binary to install directory
|
||||
mv "$temp_file" "$final_path"
|
||||
|
||||
# Make executable (not needed on Windows, but doesn't hurt)
|
||||
chmod +x "$final_path"
|
||||
|
||||
print_status "newt installed to ${final_path}"
|
||||
|
||||
# Check if install directory is in PATH
|
||||
if ! echo "$PATH" | grep -q "$install_dir"; then
|
||||
print_warning "Install directory ${install_dir} is not in your PATH."
|
||||
print_warning "Add it to your PATH by adding this line to your shell profile:"
|
||||
print_warning " export PATH=\"${install_dir}:\$PATH\""
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local exe_suffix=""
|
||||
|
||||
if [[ "$PLATFORM" == *"windows"* ]]; then
|
||||
exe_suffix=".exe"
|
||||
fi
|
||||
|
||||
local newt_path="${install_dir}/newt${exe_suffix}"
|
||||
|
||||
if [ -f "$newt_path" ] && [ -x "$newt_path" ]; then
|
||||
print_status "Installation successful!"
|
||||
print_status "newt version: $("$newt_path" --version 2>/dev/null || echo "unknown")"
|
||||
return 0
|
||||
else
|
||||
print_error "Installation failed. Binary not found or not executable."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
print_status "Installing latest version of newt..."
|
||||
|
||||
# Get latest version
|
||||
print_status "Fetching latest version from GitHub..."
|
||||
VERSION=$(get_latest_version)
|
||||
print_status "Latest version: v${VERSION}"
|
||||
|
||||
# Set base URL with the fetched version
|
||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||
|
||||
# Detect platform
|
||||
PLATFORM=$(detect_platform)
|
||||
print_status "Detected platform: ${PLATFORM}"
|
||||
|
||||
# Get install directory
|
||||
INSTALL_DIR=$(get_install_dir)
|
||||
print_status "Install directory: ${INSTALL_DIR}"
|
||||
|
||||
# Install newt
|
||||
install_newt "$PLATFORM" "$INSTALL_DIR"
|
||||
|
||||
# Verify installation
|
||||
if verify_installation "$INSTALL_DIR"; then
|
||||
print_status "newt is ready to use!"
|
||||
if [[ "$PLATFORM" == *"windows"* ]]; then
|
||||
print_status "Run 'newt --help' to get started"
|
||||
else
|
||||
print_status "Run 'newt --help' to get started"
|
||||
fi
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
78
go.mod
78
go.mod
@@ -1,22 +1,76 @@
|
||||
module github.com/fosrl/newt
|
||||
|
||||
go 1.23.1
|
||||
|
||||
toolchain go1.23.2
|
||||
go 1.25
|
||||
|
||||
require (
|
||||
github.com/docker/docker v28.5.2+incompatible
|
||||
github.com/gorilla/websocket v1.5.3
|
||||
golang.org/x/net v0.30.0
|
||||
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6
|
||||
gvisor.dev/gvisor v0.0.0-20230927004350-cbd86285d259
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0
|
||||
go.opentelemetry.io/otel v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0
|
||||
go.opentelemetry.io/otel/metric v1.39.0
|
||||
go.opentelemetry.io/otel/sdk v1.39.0
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0
|
||||
golang.org/x/crypto v0.46.0
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||
golang.org/x/net v0.48.0
|
||||
golang.org/x/sys v0.39.0
|
||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/grpc v1.77.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c
|
||||
software.sslmate.com/src/go-pkcs12 v0.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
golang.org/x/crypto v0.28.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.6.0 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.67.4 // indirect
|
||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||
github.com/prometheus/procfs v0.19.2 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
golang.org/x/mod v0.30.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/text v0.32.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.39.0 // indirect
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||
google.golang.org/protobuf v1.36.10 // indirect
|
||||
)
|
||||
|
||||
188
go.sum
188
go.sum
@@ -1,22 +1,174 @@
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
|
||||
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/containerd/errdefs v0.3.0 h1:FSZgGOeK4yuT/+DnF07/Olde/q4KBoMsaamhXxIMDp4=
|
||||
github.com/containerd/errdefs v0.3.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
|
||||
github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
|
||||
github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=
|
||||
github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
|
||||
github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
|
||||
golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
|
||||
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
|
||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||
github.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
|
||||
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 h1:/+/+UjlXjFcdDlXxKL1PouzX8Z2Vl0OxolRKeBEgYDw=
|
||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0/go.mod h1:Ldm/PDuzY2DP7IypudopCR3OCOW42NJlN9+mNEroevo=
|
||||
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.61.0/go.mod h1:iivMuj3xpR2DkUrUya3TPS/Z9h3dz7h01GxU+fQBRNg=
|
||||
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173 h1:/jFs0duh4rdb8uIfPMv78iAJGcPKDeqAFnaLBropIC4=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173/go.mod h1:tkCQ4FQXmpAgYVh++1cq16/dH4QJtmvpRv19DWGAHSA=
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvYQH2OU3/TnxLx97WDSUDRABfT18pCOYwc2GE=
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80=
|
||||
gvisor.dev/gvisor v0.0.0-20230927004350-cbd86285d259 h1:TbRPT0HtzFP3Cno1zZo7yPzEEnfu8EjLfl6IU9VfqkQ=
|
||||
gvisor.dev/gvisor v0.0.0-20230927004350-cbd86285d259/go.mod h1:AVgIgHMwK63XvmAzWG9vLQ41YnVHN0du0tEC46fI7yY=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb h1:whnFRlWMcXI9d+ZbWg+4sHnLp52d5yiIPUxMBSt4X9A=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb/go.mod h1:rpwXGsirqLqN2L0JDJQlwOboGHmptD5ZD6T2VmcqhTw=
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10 h1:3GDAcqdIg1ozBNLgPy4SLT84nfcBjr6rhGtXYtrkWLU=
|
||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10/go.mod h1:T97yPqesLiNrOYxkwmhMI0ZIlJDm+p0PMR8eRVeR5tQ=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
|
||||
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c h1:m/r7OM+Y2Ty1sgBQ7Qb27VgIMBW8ZZhT4gLnUyDIhzI=
|
||||
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c/go.mod h1:3r5CMtNQMKIvBlrmM9xWUNamjKBYPOWyXOjmg5Kts3g=
|
||||
software.sslmate.com/src/go-pkcs12 v0.7.0 h1:Db8W44cB54TWD7stUFFSWxdfpdn6fZVcDl0w3R4RVM0=
|
||||
software.sslmate.com/src/go-pkcs12 v0.7.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
|
||||
|
||||
523
healthcheck/healthcheck.go
Normal file
523
healthcheck/healthcheck.go
Normal file
@@ -0,0 +1,523 @@
|
||||
package healthcheck
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// Health represents the health status of a target
|
||||
type Health int
|
||||
|
||||
const (
|
||||
StatusUnknown Health = iota
|
||||
StatusHealthy
|
||||
StatusUnhealthy
|
||||
)
|
||||
|
||||
func (s Health) String() string {
|
||||
switch s {
|
||||
case StatusHealthy:
|
||||
return "healthy"
|
||||
case StatusUnhealthy:
|
||||
return "unhealthy"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Config holds the health check configuration for a target
|
||||
type Config struct {
|
||||
ID int `json:"id"`
|
||||
Enabled bool `json:"hcEnabled"`
|
||||
Path string `json:"hcPath"`
|
||||
Scheme string `json:"hcScheme"`
|
||||
Mode string `json:"hcMode"`
|
||||
Hostname string `json:"hcHostname"`
|
||||
Port int `json:"hcPort"`
|
||||
Interval int `json:"hcInterval"` // in seconds
|
||||
UnhealthyInterval int `json:"hcUnhealthyInterval"` // in seconds
|
||||
Timeout int `json:"hcTimeout"` // in seconds
|
||||
Headers map[string]string `json:"hcHeaders"`
|
||||
Method string `json:"hcMethod"`
|
||||
Status int `json:"hcStatus"` // HTTP status code
|
||||
TLSServerName string `json:"hcTlsServerName"`
|
||||
}
|
||||
|
||||
// Target represents a health check target with its current status
|
||||
type Target struct {
|
||||
Config Config `json:"config"`
|
||||
Status Health `json:"status"`
|
||||
LastCheck time.Time `json:"lastCheck"`
|
||||
LastError string `json:"lastError,omitempty"`
|
||||
CheckCount int `json:"checkCount"`
|
||||
timer *time.Timer
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
// StatusChangeCallback is called when any target's status changes
|
||||
type StatusChangeCallback func(targets map[int]*Target)
|
||||
|
||||
// Monitor manages health check targets and their monitoring
|
||||
type Monitor struct {
|
||||
targets map[int]*Target
|
||||
mutex sync.RWMutex
|
||||
callback StatusChangeCallback
|
||||
enforceCert bool
|
||||
}
|
||||
|
||||
// NewMonitor creates a new health check monitor
|
||||
func NewMonitor(callback StatusChangeCallback, enforceCert bool) *Monitor {
|
||||
logger.Debug("Creating new health check monitor with certificate enforcement: %t", enforceCert)
|
||||
|
||||
return &Monitor{
|
||||
targets: make(map[int]*Target),
|
||||
callback: callback,
|
||||
enforceCert: enforceCert,
|
||||
}
|
||||
}
|
||||
|
||||
// parseHeaders parses the headers string into a map
|
||||
func parseHeaders(headersStr string) map[string]string {
|
||||
headers := make(map[string]string)
|
||||
if headersStr == "" {
|
||||
return headers
|
||||
}
|
||||
|
||||
// Try to parse as JSON first
|
||||
if err := json.Unmarshal([]byte(headersStr), &headers); err == nil {
|
||||
return headers
|
||||
}
|
||||
|
||||
// Fallback to simple key:value parsing
|
||||
pairs := strings.Split(headersStr, ",")
|
||||
for _, pair := range pairs {
|
||||
kv := strings.SplitN(strings.TrimSpace(pair), ":", 2)
|
||||
if len(kv) == 2 {
|
||||
headers[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
}
|
||||
return headers
|
||||
}
|
||||
|
||||
// AddTarget adds a new health check target
|
||||
func (m *Monitor) AddTarget(config Config) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
logger.Info("Adding health check target: ID=%d, hostname=%s, port=%d, enabled=%t",
|
||||
config.ID, config.Hostname, config.Port, config.Enabled)
|
||||
|
||||
return m.addTargetUnsafe(config)
|
||||
}
|
||||
|
||||
// AddTargets adds multiple health check targets in bulk
|
||||
func (m *Monitor) AddTargets(configs []Config) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
logger.Debug("Adding %d health check targets in bulk", len(configs))
|
||||
|
||||
for _, config := range configs {
|
||||
if err := m.addTargetUnsafe(config); err != nil {
|
||||
logger.Error("Failed to add target %d: %v", config.ID, err)
|
||||
return fmt.Errorf("failed to add target %d: %v", config.ID, err)
|
||||
}
|
||||
logger.Debug("Successfully added target: ID=%d, hostname=%s", config.ID, config.Hostname)
|
||||
}
|
||||
|
||||
// Don't notify callback immediately - let the initial health checks complete first
|
||||
// The callback will be triggered when the first health check results are available
|
||||
|
||||
logger.Debug("Successfully added all %d health check targets", len(configs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// addTargetUnsafe adds a target without acquiring the mutex (internal method)
|
||||
func (m *Monitor) addTargetUnsafe(config Config) error {
|
||||
// Set defaults
|
||||
if config.Scheme == "" {
|
||||
config.Scheme = "http"
|
||||
}
|
||||
if config.Mode == "" {
|
||||
config.Mode = "http"
|
||||
}
|
||||
if config.Method == "" {
|
||||
config.Method = "GET"
|
||||
}
|
||||
if config.Interval == 0 {
|
||||
config.Interval = 30
|
||||
}
|
||||
if config.UnhealthyInterval == 0 {
|
||||
config.UnhealthyInterval = 30
|
||||
}
|
||||
if config.Timeout == 0 {
|
||||
config.Timeout = 5
|
||||
}
|
||||
|
||||
logger.Debug("Target %d configuration: scheme=%s, method=%s, interval=%ds, timeout=%ds",
|
||||
config.ID, config.Scheme, config.Method, config.Interval, config.Timeout)
|
||||
|
||||
// Parse headers if provided as string
|
||||
if len(config.Headers) == 0 && config.Path != "" {
|
||||
// This is a simplified header parsing - in real use you might want more robust parsing
|
||||
config.Headers = make(map[string]string)
|
||||
}
|
||||
|
||||
// Remove existing target if it exists
|
||||
if existing, exists := m.targets[config.ID]; exists {
|
||||
logger.Info("Replacing existing target with ID %d", config.ID)
|
||||
existing.cancel()
|
||||
}
|
||||
|
||||
// Create new target
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
target := &Target{
|
||||
Config: config,
|
||||
Status: StatusUnknown,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
client: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
// Configure TLS settings based on certificate enforcement
|
||||
InsecureSkipVerify: !m.enforceCert,
|
||||
// Use SNI TLS header if present
|
||||
ServerName: config.TLSServerName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m.targets[config.ID] = target
|
||||
|
||||
// Start monitoring if enabled
|
||||
if config.Enabled {
|
||||
logger.Info("Starting monitoring for target %d (%s:%d)", config.ID, config.Hostname, config.Port)
|
||||
go m.monitorTarget(target)
|
||||
} else {
|
||||
logger.Debug("Target %d added but monitoring is disabled", config.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTarget removes a health check target
|
||||
func (m *Monitor) RemoveTarget(id int) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
target, exists := m.targets[id]
|
||||
if !exists {
|
||||
logger.Warn("Attempted to remove non-existent target with ID %d", id)
|
||||
return fmt.Errorf("target with id %d not found", id)
|
||||
}
|
||||
|
||||
logger.Info("Removing health check target: ID=%d", id)
|
||||
target.cancel()
|
||||
delete(m.targets, id)
|
||||
|
||||
// Notify callback of status change
|
||||
if m.callback != nil {
|
||||
go m.callback(m.GetTargets())
|
||||
}
|
||||
|
||||
logger.Info("Successfully removed target %d", id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTargets removes multiple health check targets
|
||||
func (m *Monitor) RemoveTargets(ids []int) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
logger.Info("Removing %d health check targets", len(ids))
|
||||
var notFound []int
|
||||
|
||||
for _, id := range ids {
|
||||
target, exists := m.targets[id]
|
||||
if !exists {
|
||||
notFound = append(notFound, id)
|
||||
logger.Warn("Target with ID %d not found during bulk removal", id)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debug("Removing target %d", id)
|
||||
target.cancel()
|
||||
delete(m.targets, id)
|
||||
}
|
||||
|
||||
removedCount := len(ids) - len(notFound)
|
||||
logger.Info("Successfully removed %d targets", removedCount)
|
||||
|
||||
// Notify callback of status change if any targets were removed
|
||||
if len(notFound) != len(ids) && m.callback != nil {
|
||||
go m.callback(m.GetTargets())
|
||||
}
|
||||
|
||||
if len(notFound) > 0 {
|
||||
logger.Error("Some targets not found during removal: %v", notFound)
|
||||
return fmt.Errorf("targets not found: %v", notFound)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTargetsByID is a convenience method that accepts either a single ID or multiple IDs
|
||||
func (m *Monitor) RemoveTargetsByID(ids ...int) error {
|
||||
return m.RemoveTargets(ids)
|
||||
}
|
||||
|
||||
// GetTargets returns a copy of all targets
|
||||
func (m *Monitor) GetTargets() map[int]*Target {
|
||||
m.mutex.RLock()
|
||||
defer m.mutex.RUnlock()
|
||||
return m.getAllTargetsUnsafe()
|
||||
}
|
||||
|
||||
// getAllTargetsUnsafe returns a copy of all targets without acquiring the mutex (internal method)
|
||||
func (m *Monitor) getAllTargetsUnsafe() map[int]*Target {
|
||||
targets := make(map[int]*Target)
|
||||
for id, target := range m.targets {
|
||||
// Create a copy to avoid race conditions
|
||||
targetCopy := *target
|
||||
targets[id] = &targetCopy
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
// getAllTargets returns a copy of all targets (deprecated, use GetTargets)
|
||||
func (m *Monitor) getAllTargets() map[int]*Target {
|
||||
return m.GetTargets()
|
||||
}
|
||||
|
||||
// monitorTarget monitors a single target
|
||||
func (m *Monitor) monitorTarget(target *Target) {
|
||||
logger.Info("Starting health check monitoring for target %d (%s:%d)",
|
||||
target.Config.ID, target.Config.Hostname, target.Config.Port)
|
||||
|
||||
// Initial check
|
||||
oldStatus := target.Status
|
||||
m.performHealthCheck(target)
|
||||
|
||||
// Notify callback after initial check if status changed or if it's the first check
|
||||
if (oldStatus != target.Status || oldStatus == StatusUnknown) && m.callback != nil {
|
||||
logger.Info("Target %d initial status: %s", target.Config.ID, target.Status.String())
|
||||
go m.callback(m.GetTargets())
|
||||
}
|
||||
|
||||
// Set up timer based on current status
|
||||
interval := time.Duration(target.Config.Interval) * time.Second
|
||||
if target.Status == StatusUnhealthy {
|
||||
interval = time.Duration(target.Config.UnhealthyInterval) * time.Second
|
||||
}
|
||||
|
||||
logger.Debug("Target %d: initial check interval set to %v", target.Config.ID, interval)
|
||||
target.timer = time.NewTimer(interval)
|
||||
defer target.timer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-target.ctx.Done():
|
||||
logger.Info("Stopping health check monitoring for target %d", target.Config.ID)
|
||||
return
|
||||
case <-target.timer.C:
|
||||
oldStatus := target.Status
|
||||
m.performHealthCheck(target)
|
||||
|
||||
// Update timer interval if status changed
|
||||
newInterval := time.Duration(target.Config.Interval) * time.Second
|
||||
if target.Status == StatusUnhealthy {
|
||||
newInterval = time.Duration(target.Config.UnhealthyInterval) * time.Second
|
||||
}
|
||||
|
||||
if newInterval != interval {
|
||||
logger.Debug("Target %d: updating check interval from %v to %v due to status change",
|
||||
target.Config.ID, interval, newInterval)
|
||||
interval = newInterval
|
||||
}
|
||||
|
||||
// Reset timer for next check with current interval
|
||||
target.timer.Reset(interval)
|
||||
|
||||
// Notify callback if status changed
|
||||
if oldStatus != target.Status && m.callback != nil {
|
||||
logger.Info("Target %d status changed: %s -> %s",
|
||||
target.Config.ID, oldStatus.String(), target.Status.String())
|
||||
go m.callback(m.GetTargets())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// performHealthCheck performs a health check on a target
|
||||
func (m *Monitor) performHealthCheck(target *Target) {
|
||||
target.CheckCount++
|
||||
target.LastCheck = time.Now()
|
||||
target.LastError = ""
|
||||
|
||||
// Build URL
|
||||
url := fmt.Sprintf("%s://%s", target.Config.Scheme, target.Config.Hostname)
|
||||
if target.Config.Port > 0 {
|
||||
url = fmt.Sprintf("%s:%d", url, target.Config.Port)
|
||||
}
|
||||
if target.Config.Path != "" {
|
||||
if !strings.HasPrefix(target.Config.Path, "/") {
|
||||
url += "/"
|
||||
}
|
||||
url += target.Config.Path
|
||||
}
|
||||
|
||||
logger.Debug("Target %d: performing health check %d to %s",
|
||||
target.Config.ID, target.CheckCount, url)
|
||||
|
||||
if target.Config.Scheme == "https" {
|
||||
logger.Debug("Target %d: HTTPS health check with certificate enforcement: %t",
|
||||
target.Config.ID, m.enforceCert)
|
||||
}
|
||||
|
||||
// Create request
|
||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(target.Config.Timeout)*time.Second)
|
||||
defer cancel()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, target.Config.Method, url, nil)
|
||||
if err != nil {
|
||||
target.Status = StatusUnhealthy
|
||||
target.LastError = fmt.Sprintf("failed to create request: %v", err)
|
||||
logger.Warn("Target %d: failed to create request: %v", target.Config.ID, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Add headers
|
||||
for key, value := range target.Config.Headers {
|
||||
// Handle Host header specially - it must be set on req.Host, not in headers
|
||||
if strings.EqualFold(key, "Host") {
|
||||
req.Host = value
|
||||
} else {
|
||||
req.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Perform request
|
||||
resp, err := target.client.Do(req)
|
||||
if err != nil {
|
||||
target.Status = StatusUnhealthy
|
||||
target.LastError = fmt.Sprintf("request failed: %v", err)
|
||||
logger.Warn("Target %d: health check failed: %v", target.Config.ID, err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check response status
|
||||
var expectedStatus int
|
||||
if target.Config.Status > 0 {
|
||||
expectedStatus = target.Config.Status
|
||||
} else {
|
||||
expectedStatus = 0 // Use range check for 200-299
|
||||
}
|
||||
|
||||
if expectedStatus > 0 {
|
||||
logger.Debug("Target %d: checking health status against expected code %d", target.Config.ID, expectedStatus)
|
||||
// Check for specific status code
|
||||
if resp.StatusCode == expectedStatus {
|
||||
target.Status = StatusHealthy
|
||||
logger.Debug("Target %d: health check passed (status: %d, expected: %d)", target.Config.ID, resp.StatusCode, expectedStatus)
|
||||
} else {
|
||||
target.Status = StatusUnhealthy
|
||||
target.LastError = fmt.Sprintf("unexpected status code: %d (expected: %d)", resp.StatusCode, expectedStatus)
|
||||
logger.Warn("Target %d: health check failed with status code %d (expected: %d)", target.Config.ID, resp.StatusCode, expectedStatus)
|
||||
}
|
||||
} else {
|
||||
// Check for 2xx range
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
target.Status = StatusHealthy
|
||||
logger.Debug("Target %d: health check passed (status: %d)", target.Config.ID, resp.StatusCode)
|
||||
} else {
|
||||
target.Status = StatusUnhealthy
|
||||
target.LastError = fmt.Sprintf("unhealthy status code: %d", resp.StatusCode)
|
||||
logger.Warn("Target %d: health check failed with status code %d", target.Config.ID, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops monitoring all targets
|
||||
func (m *Monitor) Stop() {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
targetCount := len(m.targets)
|
||||
logger.Info("Stopping health check monitor with %d targets", targetCount)
|
||||
|
||||
for id, target := range m.targets {
|
||||
logger.Debug("Stopping monitoring for target %d", id)
|
||||
target.cancel()
|
||||
}
|
||||
m.targets = make(map[int]*Target)
|
||||
|
||||
logger.Info("Health check monitor stopped")
|
||||
}
|
||||
|
||||
// EnableTarget enables monitoring for a specific target
|
||||
func (m *Monitor) EnableTarget(id int) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
target, exists := m.targets[id]
|
||||
if !exists {
|
||||
logger.Warn("Attempted to enable non-existent target with ID %d", id)
|
||||
return fmt.Errorf("target with id %d not found", id)
|
||||
}
|
||||
|
||||
if !target.Config.Enabled {
|
||||
logger.Info("Enabling health check monitoring for target %d", id)
|
||||
target.Config.Enabled = true
|
||||
target.cancel() // Stop existing monitoring
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
target.ctx = ctx
|
||||
target.cancel = cancel
|
||||
|
||||
go m.monitorTarget(target)
|
||||
} else {
|
||||
logger.Debug("Target %d is already enabled", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableTarget disables monitoring for a specific target
|
||||
func (m *Monitor) DisableTarget(id int) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
target, exists := m.targets[id]
|
||||
if !exists {
|
||||
logger.Warn("Attempted to disable non-existent target with ID %d", id)
|
||||
return fmt.Errorf("target with id %d not found", id)
|
||||
}
|
||||
|
||||
if target.Config.Enabled {
|
||||
logger.Info("Disabling health check monitoring for target %d", id)
|
||||
target.Config.Enabled = false
|
||||
target.cancel()
|
||||
target.Status = StatusUnknown
|
||||
|
||||
// Notify callback of status change
|
||||
if m.callback != nil {
|
||||
go m.callback(m.GetTargets())
|
||||
}
|
||||
} else {
|
||||
logger.Debug("Target %d is already disabled", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
602
holepunch/holepunch.go
Normal file
602
holepunch/holepunch.go
Normal file
@@ -0,0 +1,602 @@
|
||||
package holepunch
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/bind"
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/fosrl/newt/util"
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
mrand "golang.org/x/exp/rand"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
// ExitNode represents a WireGuard exit node for hole punching
|
||||
type ExitNode struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
RelayPort uint16 `json:"relayPort"`
|
||||
PublicKey string `json:"publicKey"`
|
||||
SiteIds []int `json:"siteIds,omitempty"`
|
||||
}
|
||||
|
||||
// Manager handles UDP hole punching operations
|
||||
type Manager struct {
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
stopChan chan struct{}
|
||||
sharedBind *bind.SharedBind
|
||||
ID string
|
||||
token string
|
||||
publicKey string
|
||||
clientType string
|
||||
exitNodes map[string]ExitNode // key is endpoint
|
||||
updateChan chan struct{} // signals the goroutine to refresh exit nodes
|
||||
|
||||
sendHolepunchInterval time.Duration
|
||||
sendHolepunchIntervalMin time.Duration
|
||||
sendHolepunchIntervalMax time.Duration
|
||||
defaultIntervalMin time.Duration
|
||||
defaultIntervalMax time.Duration
|
||||
}
|
||||
|
||||
const defaultSendHolepunchIntervalMax = 60 * time.Second
|
||||
const defaultSendHolepunchIntervalMin = 1 * time.Second
|
||||
|
||||
// NewManager creates a new hole punch manager
|
||||
func NewManager(sharedBind *bind.SharedBind, ID string, clientType string, publicKey string) *Manager {
|
||||
return &Manager{
|
||||
sharedBind: sharedBind,
|
||||
ID: ID,
|
||||
clientType: clientType,
|
||||
publicKey: publicKey,
|
||||
exitNodes: make(map[string]ExitNode),
|
||||
sendHolepunchInterval: defaultSendHolepunchIntervalMin,
|
||||
sendHolepunchIntervalMin: defaultSendHolepunchIntervalMin,
|
||||
sendHolepunchIntervalMax: defaultSendHolepunchIntervalMax,
|
||||
defaultIntervalMin: defaultSendHolepunchIntervalMin,
|
||||
defaultIntervalMax: defaultSendHolepunchIntervalMax,
|
||||
}
|
||||
}
|
||||
|
||||
// SetToken updates the authentication token used for hole punching
|
||||
func (m *Manager) SetToken(token string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.token = token
|
||||
}
|
||||
|
||||
// IsRunning returns whether hole punching is currently active
|
||||
func (m *Manager) IsRunning() bool {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.running
|
||||
}
|
||||
|
||||
// Stop stops any ongoing hole punch operations
|
||||
func (m *Manager) Stop() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if !m.running {
|
||||
return
|
||||
}
|
||||
|
||||
if m.stopChan != nil {
|
||||
close(m.stopChan)
|
||||
m.stopChan = nil
|
||||
}
|
||||
|
||||
if m.updateChan != nil {
|
||||
close(m.updateChan)
|
||||
m.updateChan = nil
|
||||
}
|
||||
|
||||
m.running = false
|
||||
logger.Info("Hole punch manager stopped")
|
||||
}
|
||||
|
||||
// AddExitNode adds a new exit node to the rotation if it doesn't already exist
|
||||
func (m *Manager) AddExitNode(exitNode ExitNode) bool {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if _, exists := m.exitNodes[exitNode.Endpoint]; exists {
|
||||
logger.Debug("Exit node %s already exists in rotation", exitNode.Endpoint)
|
||||
return false
|
||||
}
|
||||
|
||||
m.exitNodes[exitNode.Endpoint] = exitNode
|
||||
logger.Info("Added exit node %s to hole punch rotation", exitNode.Endpoint)
|
||||
|
||||
// Signal the goroutine to refresh if running
|
||||
if m.running && m.updateChan != nil {
|
||||
select {
|
||||
case m.updateChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full or closed, skip
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// RemoveExitNode removes an exit node from the rotation
|
||||
func (m *Manager) RemoveExitNode(endpoint string) bool {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if _, exists := m.exitNodes[endpoint]; !exists {
|
||||
logger.Debug("Exit node %s not found in rotation", endpoint)
|
||||
return false
|
||||
}
|
||||
|
||||
delete(m.exitNodes, endpoint)
|
||||
logger.Info("Removed exit node %s from hole punch rotation", endpoint)
|
||||
|
||||
// Signal the goroutine to refresh if running
|
||||
if m.running && m.updateChan != nil {
|
||||
select {
|
||||
case m.updateChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full or closed, skip
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
/*
|
||||
RemoveExitNodesByPeer removes the peer ID from the SiteIds list in each exit node.
|
||||
If the SiteIds list becomes empty after removal, the exit node is removed entirely.
|
||||
Returns the number of exit nodes removed.
|
||||
*/
|
||||
func (m *Manager) RemoveExitNodesByPeer(peerID int) int {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
removed := 0
|
||||
for endpoint, node := range m.exitNodes {
|
||||
// Remove peerID from SiteIds if present
|
||||
newSiteIds := make([]int, 0, len(node.SiteIds))
|
||||
for _, id := range node.SiteIds {
|
||||
if id != peerID {
|
||||
newSiteIds = append(newSiteIds, id)
|
||||
}
|
||||
}
|
||||
if len(newSiteIds) != len(node.SiteIds) {
|
||||
node.SiteIds = newSiteIds
|
||||
if len(node.SiteIds) == 0 {
|
||||
delete(m.exitNodes, endpoint)
|
||||
logger.Info("Removed exit node %s as no more site IDs remain after removing peer %d", endpoint, peerID)
|
||||
removed++
|
||||
} else {
|
||||
m.exitNodes[endpoint] = node
|
||||
logger.Info("Removed peer %d from exit node %s site IDs", peerID, endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if removed > 0 {
|
||||
// Signal the goroutine to refresh if running
|
||||
if m.running && m.updateChan != nil {
|
||||
select {
|
||||
case m.updateChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full or closed, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return removed
|
||||
}
|
||||
|
||||
// GetExitNodes returns a copy of the current exit nodes
|
||||
func (m *Manager) GetExitNodes() []ExitNode {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
nodes := make([]ExitNode, 0, len(m.exitNodes))
|
||||
for _, node := range m.exitNodes {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// SetServerHolepunchInterval sets custom min and max intervals for hole punching.
|
||||
// This is useful for low power mode where longer intervals are desired.
|
||||
func (m *Manager) SetServerHolepunchInterval(min, max time.Duration) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.sendHolepunchIntervalMin = min
|
||||
m.sendHolepunchIntervalMax = max
|
||||
m.sendHolepunchInterval = min
|
||||
|
||||
logger.Info("Set hole punch intervals: min=%v, max=%v", min, max)
|
||||
|
||||
// Signal the goroutine to apply the new interval if running
|
||||
if m.running && m.updateChan != nil {
|
||||
select {
|
||||
case m.updateChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full or closed, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetInterval returns the current min and max intervals
|
||||
func (m *Manager) GetServerHolepunchInterval() (min, max time.Duration) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.sendHolepunchIntervalMin, m.sendHolepunchIntervalMax
|
||||
}
|
||||
|
||||
// ResetServerHolepunchInterval resets the hole punch interval back to the default values.
|
||||
// This restores normal operation after low power mode or other custom settings.
|
||||
func (m *Manager) ResetServerHolepunchInterval() {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.sendHolepunchIntervalMin = m.defaultIntervalMin
|
||||
m.sendHolepunchIntervalMax = m.defaultIntervalMax
|
||||
m.sendHolepunchInterval = m.defaultIntervalMin
|
||||
|
||||
logger.Info("Reset hole punch intervals to defaults: min=%v, max=%v", m.defaultIntervalMin, m.defaultIntervalMax)
|
||||
|
||||
// Signal the goroutine to apply the new interval if running
|
||||
if m.running && m.updateChan != nil {
|
||||
select {
|
||||
case m.updateChan <- struct{}{}:
|
||||
default:
|
||||
// Channel full or closed, skip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TriggerHolePunch sends an immediate hole punch packet to all configured exit nodes
|
||||
// This is useful for triggering hole punching on demand without waiting for the interval
|
||||
func (m *Manager) TriggerHolePunch() error {
|
||||
m.mu.Lock()
|
||||
|
||||
if len(m.exitNodes) == 0 {
|
||||
m.mu.Unlock()
|
||||
return fmt.Errorf("no exit nodes configured")
|
||||
}
|
||||
|
||||
// Get a copy of exit nodes to work with
|
||||
currentExitNodes := make([]ExitNode, 0, len(m.exitNodes))
|
||||
for _, node := range m.exitNodes {
|
||||
currentExitNodes = append(currentExitNodes, node)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
logger.Info("Triggering on-demand hole punch to %d exit nodes", len(currentExitNodes))
|
||||
|
||||
// Send hole punch to all exit nodes
|
||||
successCount := 0
|
||||
for _, exitNode := range currentExitNodes {
|
||||
host, err := util.ResolveDomain(exitNode.Endpoint)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
||||
continue
|
||||
}
|
||||
|
||||
serverAddr := net.JoinHostPort(host, strconv.Itoa(int(exitNode.RelayPort)))
|
||||
remoteAddr, err := net.ResolveUDPAddr("udp", serverAddr)
|
||||
if err != nil {
|
||||
logger.Error("Failed to resolve UDP address %s: %v", serverAddr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := m.sendHolePunch(remoteAddr, exitNode.PublicKey); err != nil {
|
||||
logger.Warn("Failed to send on-demand hole punch to %s: %v", exitNode.Endpoint, err)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Debug("Sent on-demand hole punch to %s", exitNode.Endpoint)
|
||||
successCount++
|
||||
}
|
||||
|
||||
if successCount == 0 {
|
||||
return fmt.Errorf("failed to send hole punch to any exit node")
|
||||
}
|
||||
|
||||
logger.Info("Successfully sent on-demand hole punch to %d/%d exit nodes", successCount, len(currentExitNodes))
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartMultipleExitNodes starts hole punching to multiple exit nodes
|
||||
func (m *Manager) StartMultipleExitNodes(exitNodes []ExitNode) error {
|
||||
m.mu.Lock()
|
||||
|
||||
if m.running {
|
||||
m.mu.Unlock()
|
||||
logger.Debug("UDP hole punch already running, skipping new request")
|
||||
return fmt.Errorf("hole punch already running")
|
||||
}
|
||||
|
||||
// Populate exit nodes map
|
||||
m.exitNodes = make(map[string]ExitNode)
|
||||
for _, node := range exitNodes {
|
||||
m.exitNodes[node.Endpoint] = node
|
||||
}
|
||||
|
||||
m.running = true
|
||||
m.stopChan = make(chan struct{})
|
||||
m.updateChan = make(chan struct{}, 1)
|
||||
m.mu.Unlock()
|
||||
|
||||
logger.Debug("Starting UDP hole punch to %d exit nodes with shared bind", len(exitNodes))
|
||||
|
||||
go m.runMultipleExitNodes()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start starts hole punching with the current set of exit nodes
|
||||
func (m *Manager) Start() error {
|
||||
m.mu.Lock()
|
||||
|
||||
if m.running {
|
||||
m.mu.Unlock()
|
||||
logger.Debug("UDP hole punch already running")
|
||||
return fmt.Errorf("hole punch already running")
|
||||
}
|
||||
|
||||
m.running = true
|
||||
m.stopChan = make(chan struct{})
|
||||
m.updateChan = make(chan struct{}, 1)
|
||||
nodeCount := len(m.exitNodes)
|
||||
m.mu.Unlock()
|
||||
|
||||
if nodeCount == 0 {
|
||||
logger.Info("Starting UDP hole punch manager (waiting for exit nodes to be added)")
|
||||
} else {
|
||||
logger.Info("Starting UDP hole punch with %d exit nodes", nodeCount)
|
||||
}
|
||||
|
||||
go m.runMultipleExitNodes()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runMultipleExitNodes performs hole punching to multiple exit nodes
|
||||
func (m *Manager) runMultipleExitNodes() {
|
||||
defer func() {
|
||||
m.mu.Lock()
|
||||
m.running = false
|
||||
m.mu.Unlock()
|
||||
logger.Info("UDP hole punch goroutine ended for all exit nodes")
|
||||
}()
|
||||
|
||||
// Resolve all endpoints upfront
|
||||
type resolvedExitNode struct {
|
||||
remoteAddr *net.UDPAddr
|
||||
publicKey string
|
||||
endpointName string
|
||||
}
|
||||
|
||||
resolveNodes := func() []resolvedExitNode {
|
||||
m.mu.Lock()
|
||||
currentExitNodes := make([]ExitNode, 0, len(m.exitNodes))
|
||||
for _, node := range m.exitNodes {
|
||||
currentExitNodes = append(currentExitNodes, node)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
var resolvedNodes []resolvedExitNode
|
||||
for _, exitNode := range currentExitNodes {
|
||||
host, err := util.ResolveDomain(exitNode.Endpoint)
|
||||
if err != nil {
|
||||
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
||||
continue
|
||||
}
|
||||
|
||||
serverAddr := net.JoinHostPort(host, strconv.Itoa(int(exitNode.RelayPort)))
|
||||
remoteAddr, err := net.ResolveUDPAddr("udp", serverAddr)
|
||||
if err != nil {
|
||||
logger.Error("Failed to resolve UDP address %s: %v", serverAddr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
resolvedNodes = append(resolvedNodes, resolvedExitNode{
|
||||
remoteAddr: remoteAddr,
|
||||
publicKey: exitNode.PublicKey,
|
||||
endpointName: exitNode.Endpoint,
|
||||
})
|
||||
logger.Debug("Resolved exit node: %s -> %s", exitNode.Endpoint, remoteAddr.String())
|
||||
}
|
||||
return resolvedNodes
|
||||
}
|
||||
|
||||
resolvedNodes := resolveNodes()
|
||||
|
||||
if len(resolvedNodes) == 0 {
|
||||
logger.Info("No exit nodes available yet, waiting for nodes to be added")
|
||||
} else {
|
||||
// Send initial hole punch to all exit nodes
|
||||
for _, node := range resolvedNodes {
|
||||
if err := m.sendHolePunch(node.remoteAddr, node.publicKey); err != nil {
|
||||
logger.Warn("Failed to send initial hole punch to %s: %v", node.endpointName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start with minimum interval
|
||||
m.mu.Lock()
|
||||
m.sendHolepunchInterval = m.sendHolepunchIntervalMin
|
||||
m.mu.Unlock()
|
||||
|
||||
ticker := time.NewTicker(m.sendHolepunchInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.stopChan:
|
||||
logger.Debug("Hole punch stopped by signal")
|
||||
return
|
||||
case <-m.updateChan:
|
||||
// Re-resolve exit nodes when update is signaled
|
||||
logger.Info("Refreshing exit nodes for hole punching")
|
||||
resolvedNodes = resolveNodes()
|
||||
if len(resolvedNodes) == 0 {
|
||||
logger.Warn("No exit nodes available after refresh")
|
||||
} else {
|
||||
logger.Info("Updated resolved nodes count: %d", len(resolvedNodes))
|
||||
}
|
||||
// Reset interval to minimum on update
|
||||
m.mu.Lock()
|
||||
m.sendHolepunchInterval = m.sendHolepunchIntervalMin
|
||||
m.mu.Unlock()
|
||||
ticker.Reset(m.sendHolepunchInterval)
|
||||
// Send immediate hole punch to newly resolved nodes
|
||||
for _, node := range resolvedNodes {
|
||||
if err := m.sendHolePunch(node.remoteAddr, node.publicKey); err != nil {
|
||||
logger.Debug("Failed to send hole punch to %s: %v", node.endpointName, err)
|
||||
}
|
||||
}
|
||||
case <-ticker.C:
|
||||
// Send hole punch to all exit nodes (if any are available)
|
||||
if len(resolvedNodes) > 0 {
|
||||
for _, node := range resolvedNodes {
|
||||
if err := m.sendHolePunch(node.remoteAddr, node.publicKey); err != nil {
|
||||
logger.Debug("Failed to send hole punch to %s: %v", node.endpointName, err)
|
||||
}
|
||||
}
|
||||
// Exponential backoff: double the interval up to max
|
||||
m.mu.Lock()
|
||||
newInterval := m.sendHolepunchInterval * 2
|
||||
if newInterval > m.sendHolepunchIntervalMax {
|
||||
newInterval = m.sendHolepunchIntervalMax
|
||||
}
|
||||
if newInterval != m.sendHolepunchInterval {
|
||||
m.sendHolepunchInterval = newInterval
|
||||
ticker.Reset(m.sendHolepunchInterval)
|
||||
logger.Debug("Increased hole punch interval to %v", m.sendHolepunchInterval)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sendHolePunch sends an encrypted hole punch packet using the shared bind
|
||||
func (m *Manager) sendHolePunch(remoteAddr *net.UDPAddr, serverPubKey string) error {
|
||||
m.mu.Lock()
|
||||
token := m.token
|
||||
ID := m.ID
|
||||
m.mu.Unlock()
|
||||
|
||||
if serverPubKey == "" || token == "" {
|
||||
return fmt.Errorf("server public key or OLM token is empty")
|
||||
}
|
||||
|
||||
var payload interface{}
|
||||
if m.clientType == "newt" {
|
||||
payload = struct {
|
||||
ID string `json:"newtId"`
|
||||
Token string `json:"token"`
|
||||
PublicKey string `json:"publicKey"`
|
||||
}{
|
||||
ID: ID,
|
||||
Token: token,
|
||||
PublicKey: m.publicKey,
|
||||
}
|
||||
} else {
|
||||
payload = struct {
|
||||
ID string `json:"olmId"`
|
||||
Token string `json:"token"`
|
||||
PublicKey string `json:"publicKey"`
|
||||
}{
|
||||
ID: ID,
|
||||
Token: token,
|
||||
PublicKey: m.publicKey,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert payload to JSON
|
||||
payloadBytes, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal payload: %w", err)
|
||||
}
|
||||
|
||||
// Encrypt the payload using the server's WireGuard public key
|
||||
encryptedPayload, err := encryptPayload(payloadBytes, serverPubKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encrypt payload: %w", err)
|
||||
}
|
||||
|
||||
jsonData, err := json.Marshal(encryptedPayload)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal encrypted payload: %w", err)
|
||||
}
|
||||
|
||||
_, err = m.sharedBind.WriteToUDP(jsonData, remoteAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to UDP: %w", err)
|
||||
}
|
||||
|
||||
logger.Debug("Sent UDP hole punch to %s: %s", remoteAddr.String(), string(jsonData))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encryptPayload encrypts the payload using ChaCha20-Poly1305 AEAD with X25519 key exchange
|
||||
func encryptPayload(payload []byte, serverPublicKey string) (interface{}, error) {
|
||||
// Generate an ephemeral keypair for this message
|
||||
ephemeralPrivateKey, err := wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate ephemeral private key: %v", err)
|
||||
}
|
||||
ephemeralPublicKey := ephemeralPrivateKey.PublicKey()
|
||||
|
||||
// Parse the server's public key
|
||||
serverPubKey, err := wgtypes.ParseKey(serverPublicKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse server public key: %v", err)
|
||||
}
|
||||
|
||||
// Use X25519 for key exchange
|
||||
var ephPrivKeyFixed [32]byte
|
||||
copy(ephPrivKeyFixed[:], ephemeralPrivateKey[:])
|
||||
|
||||
// Perform X25519 key exchange
|
||||
sharedSecret, err := curve25519.X25519(ephPrivKeyFixed[:], serverPubKey[:])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to perform X25519 key exchange: %v", err)
|
||||
}
|
||||
|
||||
// Create an AEAD cipher using the shared secret
|
||||
aead, err := chacha20poly1305.New(sharedSecret)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create AEAD cipher: %v", err)
|
||||
}
|
||||
|
||||
// Generate a random nonce
|
||||
nonce := make([]byte, aead.NonceSize())
|
||||
if _, err := mrand.Read(nonce); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate nonce: %v", err)
|
||||
}
|
||||
|
||||
// Encrypt the payload
|
||||
ciphertext := aead.Seal(nil, nonce, payload, nil)
|
||||
|
||||
// Prepare the final encrypted message
|
||||
encryptedMsg := struct {
|
||||
EphemeralPublicKey string `json:"ephemeralPublicKey"`
|
||||
Nonce []byte `json:"nonce"`
|
||||
Ciphertext []byte `json:"ciphertext"`
|
||||
}{
|
||||
EphemeralPublicKey: ephemeralPublicKey.String(),
|
||||
Nonce: nonce,
|
||||
Ciphertext: ciphertext,
|
||||
}
|
||||
|
||||
return encryptedMsg, nil
|
||||
}
|
||||
404
holepunch/tester.go
Normal file
404
holepunch/tester.go
Normal file
@@ -0,0 +1,404 @@
|
||||
package holepunch
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/bind"
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/fosrl/newt/util"
|
||||
)
|
||||
|
||||
// TestResult represents the result of a connection test
|
||||
type TestResult struct {
|
||||
// Success indicates whether the test was successful
|
||||
Success bool
|
||||
// RTT is the round-trip time of the test packet
|
||||
RTT time.Duration
|
||||
// Endpoint is the endpoint that was tested
|
||||
Endpoint string
|
||||
// Error contains any error that occurred during the test
|
||||
Error error
|
||||
}
|
||||
|
||||
// TestConnectionOptions configures the connection test
|
||||
type TestConnectionOptions struct {
|
||||
// Timeout is how long to wait for a response (default: 5 seconds)
|
||||
Timeout time.Duration
|
||||
// Retries is the number of times to retry on failure (default: 0)
|
||||
Retries int
|
||||
}
|
||||
|
||||
// DefaultTestOptions returns the default test options
|
||||
func DefaultTestOptions() TestConnectionOptions {
|
||||
return TestConnectionOptions{
|
||||
Timeout: 5 * time.Second,
|
||||
Retries: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// cachedAddr holds a cached resolved UDP address
|
||||
type cachedAddr struct {
|
||||
addr *net.UDPAddr
|
||||
resolvedAt time.Time
|
||||
}
|
||||
|
||||
// HolepunchTester monitors holepunch connectivity using magic packets
|
||||
type HolepunchTester struct {
|
||||
sharedBind *bind.SharedBind
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
stopChan chan struct{}
|
||||
|
||||
// Pending requests waiting for responses (key: echo data as string)
|
||||
pendingRequests sync.Map // map[string]*pendingRequest
|
||||
|
||||
// Callback when connection status changes
|
||||
callback HolepunchStatusCallback
|
||||
|
||||
// Address cache to avoid repeated DNS/UDP resolution
|
||||
addrCache map[string]*cachedAddr
|
||||
addrCacheMu sync.RWMutex
|
||||
addrCacheTTL time.Duration // How long cached addresses are valid
|
||||
}
|
||||
|
||||
// HolepunchStatus represents the status of a holepunch connection
|
||||
type HolepunchStatus struct {
|
||||
Endpoint string
|
||||
Connected bool
|
||||
RTT time.Duration
|
||||
}
|
||||
|
||||
// HolepunchStatusCallback is called when holepunch status changes
|
||||
type HolepunchStatusCallback func(status HolepunchStatus)
|
||||
|
||||
// pendingRequest tracks a pending test request
|
||||
type pendingRequest struct {
|
||||
endpoint string
|
||||
sentAt time.Time
|
||||
replyChan chan time.Duration
|
||||
}
|
||||
|
||||
// NewHolepunchTester creates a new holepunch tester using the given SharedBind
|
||||
func NewHolepunchTester(sharedBind *bind.SharedBind) *HolepunchTester {
|
||||
return &HolepunchTester{
|
||||
sharedBind: sharedBind,
|
||||
addrCache: make(map[string]*cachedAddr),
|
||||
addrCacheTTL: 5 * time.Minute, // Cache addresses for 5 minutes
|
||||
}
|
||||
}
|
||||
|
||||
// SetCallback sets the callback for connection status changes
|
||||
func (t *HolepunchTester) SetCallback(callback HolepunchStatusCallback) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.callback = callback
|
||||
}
|
||||
|
||||
// Start begins listening for magic packet responses
|
||||
func (t *HolepunchTester) Start() error {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if t.running {
|
||||
return fmt.Errorf("tester already running")
|
||||
}
|
||||
|
||||
if t.sharedBind == nil {
|
||||
return fmt.Errorf("sharedBind is nil")
|
||||
}
|
||||
|
||||
t.running = true
|
||||
t.stopChan = make(chan struct{})
|
||||
|
||||
// Register our callback with the SharedBind to receive magic responses
|
||||
t.sharedBind.SetMagicResponseCallback(t.handleResponse)
|
||||
|
||||
logger.Debug("HolepunchTester started")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the tester
|
||||
func (t *HolepunchTester) Stop() {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
if !t.running {
|
||||
return
|
||||
}
|
||||
|
||||
t.running = false
|
||||
close(t.stopChan)
|
||||
|
||||
// Clear the callback
|
||||
if t.sharedBind != nil {
|
||||
t.sharedBind.SetMagicResponseCallback(nil)
|
||||
}
|
||||
|
||||
// Cancel all pending requests
|
||||
t.pendingRequests.Range(func(key, value interface{}) bool {
|
||||
if req, ok := value.(*pendingRequest); ok {
|
||||
close(req.replyChan)
|
||||
}
|
||||
t.pendingRequests.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
||||
// Clear address cache
|
||||
t.addrCacheMu.Lock()
|
||||
t.addrCache = make(map[string]*cachedAddr)
|
||||
t.addrCacheMu.Unlock()
|
||||
|
||||
logger.Debug("HolepunchTester stopped")
|
||||
}
|
||||
|
||||
// resolveEndpoint resolves an endpoint to a UDP address, using cache when possible
|
||||
func (t *HolepunchTester) resolveEndpoint(endpoint string) (*net.UDPAddr, error) {
|
||||
// Check cache first
|
||||
t.addrCacheMu.RLock()
|
||||
cached, ok := t.addrCache[endpoint]
|
||||
ttl := t.addrCacheTTL
|
||||
t.addrCacheMu.RUnlock()
|
||||
|
||||
if ok && time.Since(cached.resolvedAt) < ttl {
|
||||
return cached.addr, nil
|
||||
}
|
||||
|
||||
// Resolve the endpoint
|
||||
host, err := util.ResolveDomain(endpoint)
|
||||
if err != nil {
|
||||
host = endpoint
|
||||
}
|
||||
|
||||
_, _, err = net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
host = net.JoinHostPort(host, "21820")
|
||||
}
|
||||
|
||||
remoteAddr, err := net.ResolveUDPAddr("udp", host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve UDP address %s: %w", host, err)
|
||||
}
|
||||
|
||||
// Cache the result
|
||||
t.addrCacheMu.Lock()
|
||||
t.addrCache[endpoint] = &cachedAddr{
|
||||
addr: remoteAddr,
|
||||
resolvedAt: time.Now(),
|
||||
}
|
||||
t.addrCacheMu.Unlock()
|
||||
|
||||
return remoteAddr, nil
|
||||
}
|
||||
|
||||
// InvalidateCache removes a specific endpoint from the address cache
|
||||
func (t *HolepunchTester) InvalidateCache(endpoint string) {
|
||||
t.addrCacheMu.Lock()
|
||||
delete(t.addrCache, endpoint)
|
||||
t.addrCacheMu.Unlock()
|
||||
}
|
||||
|
||||
// ClearCache clears all cached addresses
|
||||
func (t *HolepunchTester) ClearCache() {
|
||||
t.addrCacheMu.Lock()
|
||||
t.addrCache = make(map[string]*cachedAddr)
|
||||
t.addrCacheMu.Unlock()
|
||||
}
|
||||
|
||||
// handleResponse is called by SharedBind when a magic response is received
|
||||
func (t *HolepunchTester) handleResponse(addr netip.AddrPort, echoData []byte) {
|
||||
// logger.Debug("Received magic response from %s", addr.String())
|
||||
key := string(echoData)
|
||||
|
||||
value, ok := t.pendingRequests.LoadAndDelete(key)
|
||||
if !ok {
|
||||
// No matching request found
|
||||
logger.Debug("No pending request found for magic response from %s", addr.String())
|
||||
return
|
||||
}
|
||||
|
||||
req := value.(*pendingRequest)
|
||||
rtt := time.Since(req.sentAt)
|
||||
// logger.Debug("Magic response matched pending request for %s (RTT: %v)", req.endpoint, rtt)
|
||||
|
||||
// Send RTT to the waiting goroutine (non-blocking)
|
||||
select {
|
||||
case req.replyChan <- rtt:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// TestEndpoint sends a magic test packet to the endpoint and waits for a response.
|
||||
// This uses the SharedBind so packets come from the same source port as WireGuard.
|
||||
func (t *HolepunchTester) TestEndpoint(endpoint string, timeout time.Duration) TestResult {
|
||||
result := TestResult{
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
|
||||
t.mu.RLock()
|
||||
running := t.running
|
||||
sharedBind := t.sharedBind
|
||||
t.mu.RUnlock()
|
||||
|
||||
if !running {
|
||||
result.Error = fmt.Errorf("tester not running")
|
||||
return result
|
||||
}
|
||||
|
||||
if sharedBind == nil || sharedBind.IsClosed() {
|
||||
result.Error = fmt.Errorf("sharedBind is nil or closed")
|
||||
return result
|
||||
}
|
||||
|
||||
// Resolve the endpoint (using cache)
|
||||
remoteAddr, err := t.resolveEndpoint(endpoint)
|
||||
if err != nil {
|
||||
result.Error = err
|
||||
return result
|
||||
}
|
||||
|
||||
// Generate random data for the test packet
|
||||
randomData := make([]byte, bind.MagicPacketDataLen)
|
||||
if _, err := rand.Read(randomData); err != nil {
|
||||
result.Error = fmt.Errorf("failed to generate random data: %w", err)
|
||||
return result
|
||||
}
|
||||
|
||||
// Create a pending request
|
||||
req := &pendingRequest{
|
||||
endpoint: endpoint,
|
||||
sentAt: time.Now(),
|
||||
replyChan: make(chan time.Duration, 1),
|
||||
}
|
||||
|
||||
key := string(randomData)
|
||||
t.pendingRequests.Store(key, req)
|
||||
|
||||
// Build the test request packet
|
||||
request := make([]byte, bind.MagicTestRequestLen)
|
||||
copy(request, bind.MagicTestRequest)
|
||||
copy(request[len(bind.MagicTestRequest):], randomData)
|
||||
|
||||
// Send the test packet
|
||||
_, err = sharedBind.WriteToUDP(request, remoteAddr)
|
||||
if err != nil {
|
||||
t.pendingRequests.Delete(key)
|
||||
result.Error = fmt.Errorf("failed to send test packet: %w", err)
|
||||
return result
|
||||
}
|
||||
|
||||
// Wait for response with timeout
|
||||
select {
|
||||
case rtt, ok := <-req.replyChan:
|
||||
if ok {
|
||||
result.Success = true
|
||||
result.RTT = rtt
|
||||
} else {
|
||||
result.Error = fmt.Errorf("request cancelled")
|
||||
}
|
||||
case <-time.After(timeout):
|
||||
t.pendingRequests.Delete(key)
|
||||
result.Error = fmt.Errorf("timeout waiting for response")
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// TestConnectionWithBind sends a magic test packet using an existing SharedBind.
|
||||
// This is useful when you want to test the connection through the same socket
|
||||
// that WireGuard is using, which tests the actual hole-punched path.
|
||||
func TestConnectionWithBind(sharedBind *bind.SharedBind, endpoint string, opts *TestConnectionOptions) TestResult {
|
||||
if opts == nil {
|
||||
defaultOpts := DefaultTestOptions()
|
||||
opts = &defaultOpts
|
||||
}
|
||||
|
||||
result := TestResult{
|
||||
Endpoint: endpoint,
|
||||
}
|
||||
|
||||
if sharedBind == nil {
|
||||
result.Error = fmt.Errorf("sharedBind is nil")
|
||||
return result
|
||||
}
|
||||
|
||||
if sharedBind.IsClosed() {
|
||||
result.Error = fmt.Errorf("sharedBind is closed")
|
||||
return result
|
||||
}
|
||||
|
||||
// Resolve the endpoint
|
||||
host, err := util.ResolveDomain(endpoint)
|
||||
if err != nil {
|
||||
host = endpoint
|
||||
}
|
||||
|
||||
_, _, err = net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
host = net.JoinHostPort(host, "21820")
|
||||
}
|
||||
|
||||
remoteAddr, err := net.ResolveUDPAddr("udp", host)
|
||||
if err != nil {
|
||||
result.Error = fmt.Errorf("failed to resolve UDP address %s: %w", host, err)
|
||||
return result
|
||||
}
|
||||
|
||||
// Generate random data for the test packet
|
||||
randomData := make([]byte, bind.MagicPacketDataLen)
|
||||
if _, err := rand.Read(randomData); err != nil {
|
||||
result.Error = fmt.Errorf("failed to generate random data: %w", err)
|
||||
return result
|
||||
}
|
||||
|
||||
// Build the test request packet
|
||||
request := make([]byte, bind.MagicTestRequestLen)
|
||||
copy(request, bind.MagicTestRequest)
|
||||
copy(request[len(bind.MagicTestRequest):], randomData)
|
||||
|
||||
// Get the underlying UDP connection to set read deadline and read response
|
||||
udpConn := sharedBind.GetUDPConn()
|
||||
if udpConn == nil {
|
||||
result.Error = fmt.Errorf("could not get UDP connection from SharedBind")
|
||||
return result
|
||||
}
|
||||
|
||||
attempts := opts.Retries + 1
|
||||
for attempt := 0; attempt < attempts; attempt++ {
|
||||
if attempt > 0 {
|
||||
logger.Debug("Retrying connection test to %s (attempt %d/%d)", endpoint, attempt+1, attempts)
|
||||
}
|
||||
|
||||
// Note: We can't easily set a read deadline on the shared connection
|
||||
// without affecting WireGuard, so we use a goroutine with timeout instead
|
||||
startTime := time.Now()
|
||||
|
||||
// Send the test packet through the shared bind
|
||||
_, err = sharedBind.WriteToUDP(request, remoteAddr)
|
||||
if err != nil {
|
||||
result.Error = fmt.Errorf("failed to send test packet: %w", err)
|
||||
if attempt < attempts-1 {
|
||||
continue
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// For shared bind test, we send the packet but can't easily wait for
|
||||
// response without interfering with WireGuard's receive loop.
|
||||
// The response will be handled by SharedBind automatically.
|
||||
// We consider the test successful if the send succeeded.
|
||||
// For a full round-trip test, use TestConnection() with a separate socket.
|
||||
|
||||
result.RTT = time.Since(startTime)
|
||||
result.Success = true
|
||||
result.Error = nil
|
||||
logger.Debug("Test packet sent to %s via SharedBind", endpoint)
|
||||
return result
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
80
internal/state/telemetry_view.go
Normal file
80
internal/state/telemetry_view.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/internal/telemetry"
|
||||
)
|
||||
|
||||
// TelemetryView is a minimal, thread-safe implementation to feed observables.
|
||||
// Since one Newt process represents one site, we expose a single logical site.
|
||||
// site_id is a resource attribute, so we do not emit per-site labels here.
|
||||
type TelemetryView struct {
|
||||
online atomic.Bool
|
||||
lastHBUnix atomic.Int64 // unix seconds
|
||||
// per-tunnel sessions
|
||||
sessMu sync.RWMutex
|
||||
sessions map[string]*atomic.Int64
|
||||
}
|
||||
|
||||
var (
|
||||
globalView atomic.Pointer[TelemetryView]
|
||||
)
|
||||
|
||||
// Global returns a singleton TelemetryView.
|
||||
func Global() *TelemetryView {
|
||||
if v := globalView.Load(); v != nil { return v }
|
||||
v := &TelemetryView{ sessions: make(map[string]*atomic.Int64) }
|
||||
globalView.Store(v)
|
||||
telemetry.RegisterStateView(v)
|
||||
return v
|
||||
}
|
||||
|
||||
// Instrumentation helpers
|
||||
func (v *TelemetryView) IncSessions(tunnelID string) {
|
||||
v.sessMu.Lock(); defer v.sessMu.Unlock()
|
||||
c := v.sessions[tunnelID]
|
||||
if c == nil { c = &atomic.Int64{}; v.sessions[tunnelID] = c }
|
||||
c.Add(1)
|
||||
}
|
||||
func (v *TelemetryView) DecSessions(tunnelID string) {
|
||||
v.sessMu.Lock(); defer v.sessMu.Unlock()
|
||||
if c := v.sessions[tunnelID]; c != nil {
|
||||
c.Add(-1)
|
||||
if c.Load() <= 0 { delete(v.sessions, tunnelID) }
|
||||
}
|
||||
}
|
||||
func (v *TelemetryView) ClearTunnel(tunnelID string) {
|
||||
v.sessMu.Lock(); defer v.sessMu.Unlock()
|
||||
delete(v.sessions, tunnelID)
|
||||
}
|
||||
func (v *TelemetryView) SetOnline(b bool) { v.online.Store(b) }
|
||||
func (v *TelemetryView) TouchHeartbeat() { v.lastHBUnix.Store(time.Now().Unix()) }
|
||||
|
||||
// --- telemetry.StateView interface ---
|
||||
|
||||
func (v *TelemetryView) ListSites() []string { return []string{"self"} }
|
||||
func (v *TelemetryView) Online(_ string) (bool, bool) { return v.online.Load(), true }
|
||||
func (v *TelemetryView) LastHeartbeat(_ string) (time.Time, bool) {
|
||||
sec := v.lastHBUnix.Load()
|
||||
if sec == 0 { return time.Time{}, false }
|
||||
return time.Unix(sec, 0), true
|
||||
}
|
||||
func (v *TelemetryView) ActiveSessions(_ string) (int64, bool) {
|
||||
// aggregated sessions (not used for per-tunnel gauge)
|
||||
v.sessMu.RLock(); defer v.sessMu.RUnlock()
|
||||
var sum int64
|
||||
for _, c := range v.sessions { if c != nil { sum += c.Load() } }
|
||||
return sum, true
|
||||
}
|
||||
|
||||
// Extended accessor used by telemetry callback to publish per-tunnel samples.
|
||||
func (v *TelemetryView) SessionsByTunnel() map[string]int64 {
|
||||
v.sessMu.RLock(); defer v.sessMu.RUnlock()
|
||||
out := make(map[string]int64, len(v.sessions))
|
||||
for id, c := range v.sessions { if c != nil && c.Load() > 0 { out[id] = c.Load() } }
|
||||
return out
|
||||
}
|
||||
|
||||
19
internal/telemetry/constants.go
Normal file
19
internal/telemetry/constants.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package telemetry
|
||||
|
||||
// Protocol labels (low-cardinality)
|
||||
const (
|
||||
ProtocolTCP = "tcp"
|
||||
ProtocolUDP = "udp"
|
||||
)
|
||||
|
||||
// Reconnect reason bins (fixed, low-cardinality)
|
||||
const (
|
||||
ReasonServerRequest = "server_request"
|
||||
ReasonTimeout = "timeout"
|
||||
ReasonPeerClose = "peer_close"
|
||||
ReasonNetworkChange = "network_change"
|
||||
ReasonAuthError = "auth_error"
|
||||
ReasonHandshakeError = "handshake_error"
|
||||
ReasonConfigChange = "config_change"
|
||||
ReasonError = "error"
|
||||
)
|
||||
32
internal/telemetry/constants_test.go
Normal file
32
internal/telemetry/constants_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package telemetry
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAllowedConstants(t *testing.T) {
|
||||
allowedReasons := map[string]struct{}{
|
||||
ReasonServerRequest: {},
|
||||
ReasonTimeout: {},
|
||||
ReasonPeerClose: {},
|
||||
ReasonNetworkChange: {},
|
||||
ReasonAuthError: {},
|
||||
ReasonHandshakeError: {},
|
||||
ReasonConfigChange: {},
|
||||
ReasonError: {},
|
||||
}
|
||||
for k := range allowedReasons {
|
||||
if k == "" {
|
||||
t.Fatalf("empty reason constant")
|
||||
}
|
||||
}
|
||||
|
||||
allowedProtocols := map[string]struct{}{
|
||||
ProtocolTCP: {},
|
||||
ProtocolUDP: {},
|
||||
}
|
||||
for k := range allowedProtocols {
|
||||
if k == "" {
|
||||
t.Fatalf("empty protocol constant")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
542
internal/telemetry/metrics.go
Normal file
542
internal/telemetry/metrics.go
Normal file
@@ -0,0 +1,542 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
// Instruments and helpers for Newt metrics following the naming, units, and
|
||||
// low-cardinality label guidance from the issue description.
|
||||
//
|
||||
// Counters end with _total, durations are in seconds, sizes in bytes.
|
||||
// Only low-cardinality stable labels are supported: tunnel_id,
|
||||
// transport, direction, result, reason, error_type.
|
||||
var (
|
||||
initOnce sync.Once
|
||||
|
||||
meter metric.Meter
|
||||
|
||||
// Site / Registration
|
||||
mSiteRegistrations metric.Int64Counter
|
||||
mSiteOnline metric.Int64ObservableGauge
|
||||
mSiteLastHeartbeat metric.Float64ObservableGauge
|
||||
|
||||
// Tunnel / Sessions
|
||||
mTunnelSessions metric.Int64ObservableGauge
|
||||
mTunnelBytes metric.Int64Counter
|
||||
mTunnelLatency metric.Float64Histogram
|
||||
mReconnects metric.Int64Counter
|
||||
|
||||
// Connection / NAT
|
||||
mConnAttempts metric.Int64Counter
|
||||
mConnErrors metric.Int64Counter
|
||||
|
||||
// Config/Restart
|
||||
mConfigReloads metric.Int64Counter
|
||||
mConfigApply metric.Float64Histogram
|
||||
mCertRotationTotal metric.Int64Counter
|
||||
mProcessStartTime metric.Float64ObservableGauge
|
||||
|
||||
// Build info
|
||||
mBuildInfo metric.Int64ObservableGauge
|
||||
|
||||
// WebSocket
|
||||
mWSConnectLatency metric.Float64Histogram
|
||||
mWSMessages metric.Int64Counter
|
||||
mWSDisconnects metric.Int64Counter
|
||||
mWSKeepaliveFailure metric.Int64Counter
|
||||
mWSSessionDuration metric.Float64Histogram
|
||||
mWSConnected metric.Int64ObservableGauge
|
||||
mWSReconnects metric.Int64Counter
|
||||
|
||||
// Proxy
|
||||
mProxyActiveConns metric.Int64ObservableGauge
|
||||
mProxyBufferBytes metric.Int64ObservableGauge
|
||||
mProxyAsyncBacklogByte metric.Int64ObservableGauge
|
||||
mProxyDropsTotal metric.Int64Counter
|
||||
mProxyAcceptsTotal metric.Int64Counter
|
||||
mProxyConnDuration metric.Float64Histogram
|
||||
mProxyConnectionsTotal metric.Int64Counter
|
||||
|
||||
buildVersion string
|
||||
buildCommit string
|
||||
processStartUnix = float64(time.Now().UnixNano()) / 1e9
|
||||
wsConnectedState atomic.Int64
|
||||
)
|
||||
|
||||
// Proxy connection lifecycle events.
|
||||
const (
|
||||
ProxyConnectionOpened = "opened"
|
||||
ProxyConnectionClosed = "closed"
|
||||
)
|
||||
|
||||
// attrsWithSite appends site/region labels only when explicitly enabled to keep
|
||||
// label cardinality low by default.
|
||||
func attrsWithSite(extra ...attribute.KeyValue) []attribute.KeyValue {
|
||||
attrs := make([]attribute.KeyValue, len(extra))
|
||||
copy(attrs, extra)
|
||||
if ShouldIncludeSiteLabels() {
|
||||
attrs = append(attrs, siteAttrs()...)
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func registerInstruments() error {
|
||||
var err error
|
||||
initOnce.Do(func() {
|
||||
meter = otel.Meter("newt")
|
||||
if e := registerSiteInstruments(); e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
if e := registerTunnelInstruments(); e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
if e := registerConnInstruments(); e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
if e := registerConfigInstruments(); e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
if e := registerBuildWSProxyInstruments(); e != nil {
|
||||
err = e
|
||||
return
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func registerSiteInstruments() error {
|
||||
var err error
|
||||
mSiteRegistrations, err = meter.Int64Counter("newt_site_registrations_total",
|
||||
metric.WithDescription("Total site registration attempts"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mSiteOnline, err = meter.Int64ObservableGauge("newt_site_online",
|
||||
metric.WithDescription("Site online (0/1)"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mSiteLastHeartbeat, err = meter.Float64ObservableGauge("newt_site_last_heartbeat_timestamp_seconds",
|
||||
metric.WithDescription("Unix timestamp of the last site heartbeat"),
|
||||
metric.WithUnit("s"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerTunnelInstruments() error {
|
||||
var err error
|
||||
mTunnelSessions, err = meter.Int64ObservableGauge("newt_tunnel_sessions",
|
||||
metric.WithDescription("Active tunnel sessions"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mTunnelBytes, err = meter.Int64Counter("newt_tunnel_bytes_total",
|
||||
metric.WithDescription("Tunnel bytes ingress/egress"),
|
||||
metric.WithUnit("By"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mTunnelLatency, err = meter.Float64Histogram("newt_tunnel_latency_seconds",
|
||||
metric.WithDescription("Per-tunnel latency in seconds"),
|
||||
metric.WithUnit("s"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mReconnects, err = meter.Int64Counter("newt_tunnel_reconnects_total",
|
||||
metric.WithDescription("Tunnel reconnect events"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerConnInstruments() error {
|
||||
var err error
|
||||
mConnAttempts, err = meter.Int64Counter("newt_connection_attempts_total",
|
||||
metric.WithDescription("Connection attempts"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mConnErrors, err = meter.Int64Counter("newt_connection_errors_total",
|
||||
metric.WithDescription("Connection errors by type"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerConfigInstruments() error {
|
||||
mConfigReloads, _ = meter.Int64Counter("newt_config_reloads_total",
|
||||
metric.WithDescription("Configuration reloads"))
|
||||
mConfigApply, _ = meter.Float64Histogram("newt_config_apply_seconds",
|
||||
metric.WithDescription("Configuration apply duration in seconds"),
|
||||
metric.WithUnit("s"))
|
||||
mCertRotationTotal, _ = meter.Int64Counter("newt_cert_rotation_total",
|
||||
metric.WithDescription("Certificate rotation events (success/failure)"))
|
||||
mProcessStartTime, _ = meter.Float64ObservableGauge("process_start_time_seconds",
|
||||
metric.WithDescription("Unix timestamp of the process start time"),
|
||||
metric.WithUnit("s"))
|
||||
if mProcessStartTime != nil {
|
||||
if _, err := meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
o.ObserveFloat64(mProcessStartTime, processStartUnix)
|
||||
return nil
|
||||
}, mProcessStartTime); err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func registerBuildWSProxyInstruments() error {
|
||||
// Build info gauge (value 1 with version/commit attributes)
|
||||
mBuildInfo, _ = meter.Int64ObservableGauge("newt_build_info",
|
||||
metric.WithDescription("Newt build information (value is always 1)"))
|
||||
// WebSocket
|
||||
mWSConnectLatency, _ = meter.Float64Histogram("newt_websocket_connect_latency_seconds",
|
||||
metric.WithDescription("WebSocket connect latency in seconds"),
|
||||
metric.WithUnit("s"))
|
||||
mWSMessages, _ = meter.Int64Counter("newt_websocket_messages_total",
|
||||
metric.WithDescription("WebSocket messages by direction and type"))
|
||||
mWSDisconnects, _ = meter.Int64Counter("newt_websocket_disconnects_total",
|
||||
metric.WithDescription("WebSocket disconnects by reason/result"))
|
||||
mWSKeepaliveFailure, _ = meter.Int64Counter("newt_websocket_keepalive_failures_total",
|
||||
metric.WithDescription("WebSocket keepalive (ping/pong) failures"))
|
||||
mWSSessionDuration, _ = meter.Float64Histogram("newt_websocket_session_duration_seconds",
|
||||
metric.WithDescription("Duration of established WebSocket sessions"),
|
||||
metric.WithUnit("s"))
|
||||
mWSConnected, _ = meter.Int64ObservableGauge("newt_websocket_connected",
|
||||
metric.WithDescription("WebSocket connection state (1=connected, 0=disconnected)"))
|
||||
mWSReconnects, _ = meter.Int64Counter("newt_websocket_reconnects_total",
|
||||
metric.WithDescription("WebSocket reconnect attempts by reason"))
|
||||
// Proxy
|
||||
mProxyActiveConns, _ = meter.Int64ObservableGauge("newt_proxy_active_connections",
|
||||
metric.WithDescription("Proxy active connections per tunnel and protocol"))
|
||||
mProxyBufferBytes, _ = meter.Int64ObservableGauge("newt_proxy_buffer_bytes",
|
||||
metric.WithDescription("Proxy buffer bytes (may approximate async backlog)"),
|
||||
metric.WithUnit("By"))
|
||||
mProxyAsyncBacklogByte, _ = meter.Int64ObservableGauge("newt_proxy_async_backlog_bytes",
|
||||
metric.WithDescription("Unflushed async byte backlog per tunnel and protocol"),
|
||||
metric.WithUnit("By"))
|
||||
mProxyDropsTotal, _ = meter.Int64Counter("newt_proxy_drops_total",
|
||||
metric.WithDescription("Proxy drops due to write errors"))
|
||||
mProxyAcceptsTotal, _ = meter.Int64Counter("newt_proxy_accept_total",
|
||||
metric.WithDescription("Proxy connection accepts by protocol and result"))
|
||||
mProxyConnDuration, _ = meter.Float64Histogram("newt_proxy_connection_duration_seconds",
|
||||
metric.WithDescription("Duration of completed proxy connections"),
|
||||
metric.WithUnit("s"))
|
||||
mProxyConnectionsTotal, _ = meter.Int64Counter("newt_proxy_connections_total",
|
||||
metric.WithDescription("Proxy connection lifecycle events by protocol"))
|
||||
// Register a default callback for build info if version/commit set
|
||||
reg, e := meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
if buildVersion == "" && buildCommit == "" {
|
||||
return nil
|
||||
}
|
||||
attrs := []attribute.KeyValue{}
|
||||
if buildVersion != "" {
|
||||
attrs = append(attrs, attribute.String("version", buildVersion))
|
||||
}
|
||||
if buildCommit != "" {
|
||||
attrs = append(attrs, attribute.String("commit", buildCommit))
|
||||
}
|
||||
if ShouldIncludeSiteLabels() {
|
||||
attrs = append(attrs, siteAttrs()...)
|
||||
}
|
||||
o.ObserveInt64(mBuildInfo, 1, metric.WithAttributes(attrs...))
|
||||
return nil
|
||||
}, mBuildInfo)
|
||||
if e != nil {
|
||||
otel.Handle(e)
|
||||
} else {
|
||||
// Provide a functional stopper that unregisters the callback
|
||||
obsStopper = func() { _ = reg.Unregister() }
|
||||
}
|
||||
if mWSConnected != nil {
|
||||
if regConn, err := meter.RegisterCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
val := wsConnectedState.Load()
|
||||
o.ObserveInt64(mWSConnected, val, metric.WithAttributes(attrsWithSite()...))
|
||||
return nil
|
||||
}, mWSConnected); err != nil {
|
||||
otel.Handle(err)
|
||||
} else {
|
||||
wsConnStopper = func() { _ = regConn.Unregister() }
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Observable registration: Newt can register a callback to report gauges.
|
||||
// Call SetObservableCallback once to start observing online status, last
|
||||
// heartbeat seconds, and active sessions.
|
||||
|
||||
var (
|
||||
obsOnce sync.Once
|
||||
obsStopper func()
|
||||
proxyObsOnce sync.Once
|
||||
proxyStopper func()
|
||||
wsConnStopper func()
|
||||
)
|
||||
|
||||
// SetObservableCallback registers a single callback that will be invoked
|
||||
// on collection. Use the provided observer to emit values for the observable
|
||||
// gauges defined here.
|
||||
//
|
||||
// Example inside your code (where you have access to current state):
|
||||
//
|
||||
// telemetry.SetObservableCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
// o.ObserveInt64(mSiteOnline, 1)
|
||||
// o.ObserveFloat64(mSiteLastHeartbeat, float64(lastHB.Unix()))
|
||||
// o.ObserveInt64(mTunnelSessions, int64(len(activeSessions)))
|
||||
// return nil
|
||||
// })
|
||||
func SetObservableCallback(cb func(context.Context, metric.Observer) error) {
|
||||
obsOnce.Do(func() {
|
||||
reg, e := meter.RegisterCallback(cb, mSiteOnline, mSiteLastHeartbeat, mTunnelSessions)
|
||||
if e != nil {
|
||||
otel.Handle(e)
|
||||
obsStopper = func() {
|
||||
// no-op: registration failed; keep stopper callable
|
||||
}
|
||||
return
|
||||
}
|
||||
// Provide a functional stopper mirroring proxy/build-info behavior
|
||||
obsStopper = func() { _ = reg.Unregister() }
|
||||
})
|
||||
}
|
||||
|
||||
// SetProxyObservableCallback registers a callback to observe proxy gauges.
|
||||
func SetProxyObservableCallback(cb func(context.Context, metric.Observer) error) {
|
||||
proxyObsOnce.Do(func() {
|
||||
reg, e := meter.RegisterCallback(cb, mProxyActiveConns, mProxyBufferBytes, mProxyAsyncBacklogByte)
|
||||
if e != nil {
|
||||
otel.Handle(e)
|
||||
proxyStopper = func() {
|
||||
// no-op: registration failed; keep stopper callable
|
||||
}
|
||||
return
|
||||
}
|
||||
// Provide a functional stopper to unregister later if needed
|
||||
proxyStopper = func() { _ = reg.Unregister() }
|
||||
})
|
||||
}
|
||||
|
||||
// Build info registration
|
||||
func RegisterBuildInfo(version, commit string) {
|
||||
buildVersion = version
|
||||
buildCommit = commit
|
||||
}
|
||||
|
||||
// Config reloads
|
||||
func IncConfigReload(ctx context.Context, result string) {
|
||||
mConfigReloads.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
// Helpers for counters/histograms
|
||||
|
||||
func IncSiteRegistration(ctx context.Context, result string) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("result", result),
|
||||
}
|
||||
mSiteRegistrations.Add(ctx, 1, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func AddTunnelBytes(ctx context.Context, tunnelID, direction string, n int64) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("direction", direction),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mTunnelBytes.Add(ctx, n, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
// AddTunnelBytesSet adds bytes using a pre-built attribute.Set to avoid per-call allocations.
|
||||
func AddTunnelBytesSet(ctx context.Context, n int64, attrs attribute.Set) {
|
||||
mTunnelBytes.Add(ctx, n, metric.WithAttributeSet(attrs))
|
||||
}
|
||||
|
||||
// --- WebSocket helpers ---
|
||||
|
||||
func ObserveWSConnectLatency(ctx context.Context, seconds float64, result, errorType string) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("transport", "websocket"),
|
||||
attribute.String("result", result),
|
||||
}
|
||||
if errorType != "" {
|
||||
attrs = append(attrs, attribute.String("error_type", errorType))
|
||||
}
|
||||
mWSConnectLatency.Record(ctx, seconds, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func IncWSMessage(ctx context.Context, direction, msgType string) {
|
||||
mWSMessages.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("direction", direction),
|
||||
attribute.String("msg_type", msgType),
|
||||
)...))
|
||||
}
|
||||
|
||||
func IncWSDisconnect(ctx context.Context, reason, result string) {
|
||||
mWSDisconnects.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("reason", reason),
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
func IncWSKeepaliveFailure(ctx context.Context, reason string) {
|
||||
mWSKeepaliveFailure.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("reason", reason),
|
||||
)...))
|
||||
}
|
||||
|
||||
// SetWSConnectionState updates the backing gauge for the WebSocket connected state.
|
||||
func SetWSConnectionState(connected bool) {
|
||||
if connected {
|
||||
wsConnectedState.Store(1)
|
||||
} else {
|
||||
wsConnectedState.Store(0)
|
||||
}
|
||||
}
|
||||
|
||||
// IncWSReconnect increments the WebSocket reconnect counter with a bounded reason label.
|
||||
func IncWSReconnect(ctx context.Context, reason string) {
|
||||
if reason == "" {
|
||||
reason = "unknown"
|
||||
}
|
||||
mWSReconnects.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("reason", reason),
|
||||
)...))
|
||||
}
|
||||
|
||||
func ObserveWSSessionDuration(ctx context.Context, seconds float64, result string) {
|
||||
mWSSessionDuration.Record(ctx, seconds, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
// --- Proxy helpers ---
|
||||
|
||||
func ObserveProxyActiveConnsObs(o metric.Observer, value int64, attrs []attribute.KeyValue) {
|
||||
o.ObserveInt64(mProxyActiveConns, value, metric.WithAttributes(attrs...))
|
||||
}
|
||||
|
||||
func ObserveProxyBufferBytesObs(o metric.Observer, value int64, attrs []attribute.KeyValue) {
|
||||
o.ObserveInt64(mProxyBufferBytes, value, metric.WithAttributes(attrs...))
|
||||
}
|
||||
|
||||
func ObserveProxyAsyncBacklogObs(o metric.Observer, value int64, attrs []attribute.KeyValue) {
|
||||
o.ObserveInt64(mProxyAsyncBacklogByte, value, metric.WithAttributes(attrs...))
|
||||
}
|
||||
|
||||
func IncProxyDrops(ctx context.Context, tunnelID, protocol string) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("protocol", protocol),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mProxyDropsTotal.Add(ctx, 1, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func IncProxyAccept(ctx context.Context, tunnelID, protocol, result, reason string) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("protocol", protocol),
|
||||
attribute.String("result", result),
|
||||
}
|
||||
if reason != "" {
|
||||
attrs = append(attrs, attribute.String("reason", reason))
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mProxyAcceptsTotal.Add(ctx, 1, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func ObserveProxyConnectionDuration(ctx context.Context, tunnelID, protocol, result string, seconds float64) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("protocol", protocol),
|
||||
attribute.String("result", result),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mProxyConnDuration.Record(ctx, seconds, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
// IncProxyConnectionEvent records proxy connection lifecycle events (opened/closed).
|
||||
func IncProxyConnectionEvent(ctx context.Context, tunnelID, protocol, event string) {
|
||||
if event == "" {
|
||||
event = "unknown"
|
||||
}
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("protocol", protocol),
|
||||
attribute.String("event", event),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mProxyConnectionsTotal.Add(ctx, 1, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
// --- Config/PKI helpers ---
|
||||
|
||||
func ObserveConfigApply(ctx context.Context, phase, result string, seconds float64) {
|
||||
mConfigApply.Record(ctx, seconds, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("phase", phase),
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
func IncCertRotation(ctx context.Context, result string) {
|
||||
mCertRotationTotal.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
func ObserveTunnelLatency(ctx context.Context, tunnelID, transport string, seconds float64) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("transport", transport),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mTunnelLatency.Record(ctx, seconds, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func IncReconnect(ctx context.Context, tunnelID, initiator, reason string) {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("initiator", initiator),
|
||||
attribute.String("reason", reason),
|
||||
}
|
||||
if ShouldIncludeTunnelID() && tunnelID != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tunnelID))
|
||||
}
|
||||
mReconnects.Add(ctx, 1, metric.WithAttributes(attrsWithSite(attrs...)...))
|
||||
}
|
||||
|
||||
func IncConnAttempt(ctx context.Context, transport, result string) {
|
||||
mConnAttempts.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("transport", transport),
|
||||
attribute.String("result", result),
|
||||
)...))
|
||||
}
|
||||
|
||||
func IncConnError(ctx context.Context, transport, typ string) {
|
||||
mConnErrors.Add(ctx, 1, metric.WithAttributes(attrsWithSite(
|
||||
attribute.String("transport", transport),
|
||||
attribute.String("error_type", typ),
|
||||
)...))
|
||||
}
|
||||
59
internal/telemetry/metrics_test_helper.go
Normal file
59
internal/telemetry/metrics_test_helper.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
func resetMetricsForTest() {
|
||||
initOnce = sync.Once{}
|
||||
obsOnce = sync.Once{}
|
||||
proxyObsOnce = sync.Once{}
|
||||
obsStopper = nil
|
||||
proxyStopper = nil
|
||||
if wsConnStopper != nil {
|
||||
wsConnStopper()
|
||||
}
|
||||
wsConnStopper = nil
|
||||
meter = nil
|
||||
|
||||
mSiteRegistrations = nil
|
||||
mSiteOnline = nil
|
||||
mSiteLastHeartbeat = nil
|
||||
|
||||
mTunnelSessions = nil
|
||||
mTunnelBytes = nil
|
||||
mTunnelLatency = nil
|
||||
mReconnects = nil
|
||||
|
||||
mConnAttempts = nil
|
||||
mConnErrors = nil
|
||||
|
||||
mConfigReloads = nil
|
||||
mConfigApply = nil
|
||||
mCertRotationTotal = nil
|
||||
mProcessStartTime = nil
|
||||
|
||||
mBuildInfo = nil
|
||||
|
||||
mWSConnectLatency = nil
|
||||
mWSMessages = nil
|
||||
mWSDisconnects = nil
|
||||
mWSKeepaliveFailure = nil
|
||||
mWSSessionDuration = nil
|
||||
mWSConnected = nil
|
||||
mWSReconnects = nil
|
||||
|
||||
mProxyActiveConns = nil
|
||||
mProxyBufferBytes = nil
|
||||
mProxyAsyncBacklogByte = nil
|
||||
mProxyDropsTotal = nil
|
||||
mProxyAcceptsTotal = nil
|
||||
mProxyConnDuration = nil
|
||||
mProxyConnectionsTotal = nil
|
||||
|
||||
processStartUnix = float64(time.Now().UnixNano()) / 1e9
|
||||
wsConnectedState.Store(0)
|
||||
includeTunnelIDVal.Store(false)
|
||||
includeSiteLabelVal.Store(false)
|
||||
}
|
||||
106
internal/telemetry/state_view.go
Normal file
106
internal/telemetry/state_view.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
// StateView provides a read-only view for observable gauges.
|
||||
// Implementations must be concurrency-safe and avoid blocking operations.
|
||||
// All methods should be fast and use RLocks where applicable.
|
||||
type StateView interface {
|
||||
// ListSites returns a stable, low-cardinality list of site IDs to expose.
|
||||
ListSites() []string
|
||||
// Online returns whether the site is online.
|
||||
Online(siteID string) (online bool, ok bool)
|
||||
// LastHeartbeat returns the last heartbeat time for a site.
|
||||
LastHeartbeat(siteID string) (t time.Time, ok bool)
|
||||
// ActiveSessions returns the current number of active sessions for a site (across tunnels),
|
||||
// or scoped to site if your model is site-scoped.
|
||||
ActiveSessions(siteID string) (n int64, ok bool)
|
||||
}
|
||||
|
||||
var (
|
||||
stateView atomic.Value // of type StateView
|
||||
)
|
||||
|
||||
// RegisterStateView sets the global StateView used by the default observable callback.
|
||||
func RegisterStateView(v StateView) {
|
||||
stateView.Store(v)
|
||||
// If instruments are registered, ensure a callback exists.
|
||||
if v != nil {
|
||||
SetObservableCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
if any := stateView.Load(); any != nil {
|
||||
if sv, ok := any.(StateView); ok {
|
||||
for _, siteID := range sv.ListSites() {
|
||||
observeSiteOnlineFor(o, sv, siteID)
|
||||
observeLastHeartbeatFor(o, sv, siteID)
|
||||
observeSessionsFor(o, siteID, sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func observeSiteOnlineFor(o metric.Observer, sv StateView, siteID string) {
|
||||
if online, ok := sv.Online(siteID); ok {
|
||||
val := int64(0)
|
||||
if online {
|
||||
val = 1
|
||||
}
|
||||
o.ObserveInt64(mSiteOnline, val, metric.WithAttributes(
|
||||
attribute.String("site_id", siteID),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
func observeLastHeartbeatFor(o metric.Observer, sv StateView, siteID string) {
|
||||
if t, ok := sv.LastHeartbeat(siteID); ok {
|
||||
ts := float64(t.UnixNano()) / 1e9
|
||||
o.ObserveFloat64(mSiteLastHeartbeat, ts, metric.WithAttributes(
|
||||
attribute.String("site_id", siteID),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
func observeSessionsFor(o metric.Observer, siteID string, any interface{}) {
|
||||
if tm, ok := any.(interface{ SessionsByTunnel() map[string]int64 }); ok {
|
||||
sessions := tm.SessionsByTunnel()
|
||||
// If tunnel_id labels are enabled, preserve existing per-tunnel observations
|
||||
if ShouldIncludeTunnelID() {
|
||||
for tid, n := range sessions {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.String("site_id", siteID),
|
||||
}
|
||||
if tid != "" {
|
||||
attrs = append(attrs, attribute.String("tunnel_id", tid))
|
||||
}
|
||||
o.ObserveInt64(mTunnelSessions, n, metric.WithAttributes(attrs...))
|
||||
}
|
||||
return
|
||||
}
|
||||
// When tunnel_id is disabled, collapse per-tunnel counts into a single site-level value
|
||||
var total int64
|
||||
for _, n := range sessions {
|
||||
total += n
|
||||
}
|
||||
// If there are no per-tunnel entries, fall back to ActiveSessions() if available
|
||||
if total == 0 {
|
||||
if svAny := stateView.Load(); svAny != nil {
|
||||
if sv, ok := svAny.(StateView); ok {
|
||||
if n, ok2 := sv.ActiveSessions(siteID); ok2 {
|
||||
total = n
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
o.ObserveInt64(mTunnelSessions, total, metric.WithAttributes(attribute.String("site_id", siteID)))
|
||||
return
|
||||
}
|
||||
}
|
||||
384
internal/telemetry/telemetry.go
Normal file
384
internal/telemetry/telemetry.go
Normal file
@@ -0,0 +1,384 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
promclient "github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"go.opentelemetry.io/contrib/instrumentation/runtime"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
|
||||
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/sdk/metric"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
"go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Config controls telemetry initialization via env flags.
|
||||
//
|
||||
// Defaults align with the issue requirements:
|
||||
// - Prometheus exporter enabled by default (/metrics)
|
||||
// - OTLP exporter disabled by default
|
||||
// - Durations in seconds, bytes in raw bytes
|
||||
// - Admin HTTP server address configurable (for mounting /metrics)
|
||||
type Config struct {
|
||||
ServiceName string
|
||||
ServiceVersion string
|
||||
|
||||
// Optional resource attributes
|
||||
SiteID string
|
||||
Region string
|
||||
|
||||
PromEnabled bool
|
||||
OTLPEnabled bool
|
||||
|
||||
OTLPEndpoint string // host:port
|
||||
OTLPInsecure bool
|
||||
|
||||
MetricExportInterval time.Duration
|
||||
AdminAddr string // e.g.: ":2112"
|
||||
|
||||
// Optional build info for newt_build_info metric
|
||||
BuildVersion string
|
||||
BuildCommit string
|
||||
}
|
||||
|
||||
// FromEnv reads configuration from environment variables.
|
||||
//
|
||||
// NEWT_METRICS_PROMETHEUS_ENABLED (default: true)
|
||||
// NEWT_METRICS_OTLP_ENABLED (default: false)
|
||||
// OTEL_EXPORTER_OTLP_ENDPOINT (default: "localhost:4317")
|
||||
// OTEL_EXPORTER_OTLP_INSECURE (default: true)
|
||||
// OTEL_METRIC_EXPORT_INTERVAL (default: 15s)
|
||||
// OTEL_SERVICE_NAME (default: "newt")
|
||||
// OTEL_SERVICE_VERSION (default: "")
|
||||
// NEWT_ADMIN_ADDR (default: ":2112")
|
||||
func FromEnv() Config {
|
||||
// Prefer explicit NEWT_* env vars, then fall back to OTEL_RESOURCE_ATTRIBUTES
|
||||
site := getenv("NEWT_SITE_ID", "")
|
||||
if site == "" {
|
||||
site = getenv("NEWT_ID", "")
|
||||
}
|
||||
region := os.Getenv("NEWT_REGION")
|
||||
if site == "" || region == "" {
|
||||
if ra := os.Getenv("OTEL_RESOURCE_ATTRIBUTES"); ra != "" {
|
||||
m := parseResourceAttributes(ra)
|
||||
if site == "" {
|
||||
site = m["site_id"]
|
||||
}
|
||||
if region == "" {
|
||||
region = m["region"]
|
||||
}
|
||||
}
|
||||
}
|
||||
return Config{
|
||||
ServiceName: getenv("OTEL_SERVICE_NAME", "newt"),
|
||||
ServiceVersion: os.Getenv("OTEL_SERVICE_VERSION"),
|
||||
SiteID: site,
|
||||
Region: region,
|
||||
PromEnabled: getenv("NEWT_METRICS_PROMETHEUS_ENABLED", "true") == "true",
|
||||
OTLPEnabled: getenv("NEWT_METRICS_OTLP_ENABLED", "false") == "true",
|
||||
OTLPEndpoint: getenv("OTEL_EXPORTER_OTLP_ENDPOINT", "localhost:4317"),
|
||||
OTLPInsecure: getenv("OTEL_EXPORTER_OTLP_INSECURE", "true") == "true",
|
||||
MetricExportInterval: getdur("OTEL_METRIC_EXPORT_INTERVAL", 15*time.Second),
|
||||
AdminAddr: getenv("NEWT_ADMIN_ADDR", ":2112"),
|
||||
}
|
||||
}
|
||||
|
||||
// Setup holds initialized telemetry providers and (optionally) a /metrics handler.
|
||||
// Call Shutdown when the process terminates to flush exporters.
|
||||
type Setup struct {
|
||||
MeterProvider *metric.MeterProvider
|
||||
TracerProvider *trace.TracerProvider
|
||||
|
||||
PrometheusHandler http.Handler // nil if Prometheus exporter disabled
|
||||
|
||||
shutdowns []func(context.Context) error
|
||||
}
|
||||
|
||||
// Init configures OpenTelemetry metrics and (optionally) tracing.
|
||||
//
|
||||
// It sets a global MeterProvider and TracerProvider, registers runtime instrumentation,
|
||||
// installs recommended histogram views for *_latency_seconds, and returns a Setup with
|
||||
// a Shutdown method to flush exporters.
|
||||
func Init(ctx context.Context, cfg Config) (*Setup, error) {
|
||||
// Configure tunnel_id label inclusion from env (default true)
|
||||
if getenv("NEWT_METRICS_INCLUDE_TUNNEL_ID", "true") == "true" {
|
||||
includeTunnelIDVal.Store(true)
|
||||
} else {
|
||||
includeTunnelIDVal.Store(false)
|
||||
}
|
||||
if getenv("NEWT_METRICS_INCLUDE_SITE_LABELS", "true") == "true" {
|
||||
includeSiteLabelVal.Store(true)
|
||||
} else {
|
||||
includeSiteLabelVal.Store(false)
|
||||
}
|
||||
res := buildResource(ctx, cfg)
|
||||
UpdateSiteInfo(cfg.SiteID, cfg.Region)
|
||||
|
||||
s := &Setup{}
|
||||
readers, promHandler, shutdowns, err := setupMetricExport(ctx, cfg, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.PrometheusHandler = promHandler
|
||||
// Build provider
|
||||
mp := buildMeterProvider(res, readers)
|
||||
otel.SetMeterProvider(mp)
|
||||
s.MeterProvider = mp
|
||||
s.shutdowns = append(s.shutdowns, mp.Shutdown)
|
||||
// Optional tracing
|
||||
if cfg.OTLPEnabled {
|
||||
if tp, shutdown := setupTracing(ctx, cfg, res); tp != nil {
|
||||
otel.SetTracerProvider(tp)
|
||||
s.TracerProvider = tp
|
||||
s.shutdowns = append(s.shutdowns, func(c context.Context) error {
|
||||
return errors.Join(shutdown(c), tp.Shutdown(c))
|
||||
})
|
||||
}
|
||||
}
|
||||
// Add metric exporter shutdowns
|
||||
s.shutdowns = append(s.shutdowns, shutdowns...)
|
||||
// Runtime metrics
|
||||
_ = runtime.Start(runtime.WithMeterProvider(mp))
|
||||
// Instruments
|
||||
if err := registerInstruments(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.BuildVersion != "" || cfg.BuildCommit != "" {
|
||||
RegisterBuildInfo(cfg.BuildVersion, cfg.BuildCommit)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func buildResource(ctx context.Context, cfg Config) *resource.Resource {
|
||||
attrs := []attribute.KeyValue{
|
||||
semconv.ServiceName(cfg.ServiceName),
|
||||
semconv.ServiceVersion(cfg.ServiceVersion),
|
||||
}
|
||||
if cfg.SiteID != "" {
|
||||
attrs = append(attrs, attribute.String("site_id", cfg.SiteID))
|
||||
}
|
||||
if cfg.Region != "" {
|
||||
attrs = append(attrs, attribute.String("region", cfg.Region))
|
||||
}
|
||||
res, _ := resource.New(ctx, resource.WithFromEnv(), resource.WithHost(), resource.WithAttributes(attrs...))
|
||||
return res
|
||||
}
|
||||
|
||||
func setupMetricExport(ctx context.Context, cfg Config, _ *resource.Resource) ([]metric.Reader, http.Handler, []func(context.Context) error, error) {
|
||||
var readers []metric.Reader
|
||||
var shutdowns []func(context.Context) error
|
||||
var promHandler http.Handler
|
||||
if cfg.PromEnabled {
|
||||
reg := promclient.NewRegistry()
|
||||
exp, err := prometheus.New(prometheus.WithRegisterer(reg))
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
readers = append(readers, exp)
|
||||
promHandler = promhttp.HandlerFor(reg, promhttp.HandlerOpts{})
|
||||
}
|
||||
if cfg.OTLPEnabled {
|
||||
mopts := []otlpmetricgrpc.Option{otlpmetricgrpc.WithEndpoint(cfg.OTLPEndpoint)}
|
||||
if hdrs := parseOTLPHeaders(os.Getenv("OTEL_EXPORTER_OTLP_HEADERS")); len(hdrs) > 0 {
|
||||
mopts = append(mopts, otlpmetricgrpc.WithHeaders(hdrs))
|
||||
}
|
||||
if cfg.OTLPInsecure {
|
||||
mopts = append(mopts, otlpmetricgrpc.WithInsecure())
|
||||
} else if certFile := os.Getenv("OTEL_EXPORTER_OTLP_CERTIFICATE"); certFile != "" {
|
||||
if creds, cerr := credentials.NewClientTLSFromFile(certFile, ""); cerr == nil {
|
||||
mopts = append(mopts, otlpmetricgrpc.WithTLSCredentials(creds))
|
||||
}
|
||||
}
|
||||
mexp, err := otlpmetricgrpc.New(ctx, mopts...)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
readers = append(readers, metric.NewPeriodicReader(mexp, metric.WithInterval(cfg.MetricExportInterval)))
|
||||
shutdowns = append(shutdowns, mexp.Shutdown)
|
||||
}
|
||||
return readers, promHandler, shutdowns, nil
|
||||
}
|
||||
|
||||
func buildMeterProvider(res *resource.Resource, readers []metric.Reader) *metric.MeterProvider {
|
||||
var mpOpts []metric.Option
|
||||
mpOpts = append(mpOpts, metric.WithResource(res))
|
||||
for _, r := range readers {
|
||||
mpOpts = append(mpOpts, metric.WithReader(r))
|
||||
}
|
||||
mpOpts = append(mpOpts, metric.WithView(metric.NewView(
|
||||
metric.Instrument{Name: "newt_*_latency_seconds"},
|
||||
metric.Stream{Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30}}},
|
||||
)))
|
||||
mpOpts = append(mpOpts, metric.WithView(metric.NewView(
|
||||
metric.Instrument{Name: "newt_*"},
|
||||
metric.Stream{AttributeFilter: func(kv attribute.KeyValue) bool {
|
||||
k := string(kv.Key)
|
||||
switch k {
|
||||
case "tunnel_id", "transport", "direction", "protocol", "result", "reason", "initiator", "error_type", "msg_type", "phase", "version", "commit", "site_id", "region":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}},
|
||||
)))
|
||||
return metric.NewMeterProvider(mpOpts...)
|
||||
}
|
||||
|
||||
func setupTracing(ctx context.Context, cfg Config, res *resource.Resource) (*trace.TracerProvider, func(context.Context) error) {
|
||||
topts := []otlptracegrpc.Option{otlptracegrpc.WithEndpoint(cfg.OTLPEndpoint)}
|
||||
if hdrs := parseOTLPHeaders(os.Getenv("OTEL_EXPORTER_OTLP_HEADERS")); len(hdrs) > 0 {
|
||||
topts = append(topts, otlptracegrpc.WithHeaders(hdrs))
|
||||
}
|
||||
if cfg.OTLPInsecure {
|
||||
topts = append(topts, otlptracegrpc.WithInsecure())
|
||||
} else if certFile := os.Getenv("OTEL_EXPORTER_OTLP_CERTIFICATE"); certFile != "" {
|
||||
if creds, cerr := credentials.NewClientTLSFromFile(certFile, ""); cerr == nil {
|
||||
topts = append(topts, otlptracegrpc.WithTLSCredentials(creds))
|
||||
}
|
||||
}
|
||||
exp, err := otlptracegrpc.New(ctx, topts...)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
tp := trace.NewTracerProvider(trace.WithBatcher(exp), trace.WithResource(res))
|
||||
return tp, exp.Shutdown
|
||||
}
|
||||
|
||||
// Shutdown flushes exporters and providers in reverse init order.
|
||||
func (s *Setup) Shutdown(ctx context.Context) error {
|
||||
var err error
|
||||
for i := len(s.shutdowns) - 1; i >= 0; i-- {
|
||||
err = errors.Join(err, s.shutdowns[i](ctx))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func parseOTLPHeaders(h string) map[string]string {
|
||||
m := map[string]string{}
|
||||
if h == "" {
|
||||
return m
|
||||
}
|
||||
pairs := strings.Split(h, ",")
|
||||
for _, p := range pairs {
|
||||
kv := strings.SplitN(strings.TrimSpace(p), "=", 2)
|
||||
if len(kv) == 2 {
|
||||
m[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// parseResourceAttributes parses OTEL_RESOURCE_ATTRIBUTES formatted as k=v,k2=v2
|
||||
func parseResourceAttributes(s string) map[string]string {
|
||||
m := map[string]string{}
|
||||
if s == "" {
|
||||
return m
|
||||
}
|
||||
parts := strings.Split(s, ",")
|
||||
for _, p := range parts {
|
||||
kv := strings.SplitN(strings.TrimSpace(p), "=", 2)
|
||||
if len(kv) == 2 {
|
||||
m[strings.TrimSpace(kv[0])] = strings.TrimSpace(kv[1])
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// Global site/region used to enrich metric labels.
|
||||
var siteIDVal atomic.Value
|
||||
var regionVal atomic.Value
|
||||
var (
|
||||
includeTunnelIDVal atomic.Value // bool; default true
|
||||
includeSiteLabelVal atomic.Value // bool; default false
|
||||
)
|
||||
|
||||
// UpdateSiteInfo updates the global site_id and region used for metric labels.
|
||||
// Thread-safe via atomic.Value: subsequent metric emissions will include
|
||||
// the new labels, prior emissions remain unchanged.
|
||||
func UpdateSiteInfo(siteID, region string) {
|
||||
if siteID != "" {
|
||||
siteIDVal.Store(siteID)
|
||||
}
|
||||
if region != "" {
|
||||
regionVal.Store(region)
|
||||
}
|
||||
}
|
||||
|
||||
func getSiteID() string {
|
||||
if v, ok := siteIDVal.Load().(string); ok {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func getRegion() string {
|
||||
if v, ok := regionVal.Load().(string); ok {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// siteAttrs returns label KVs for site_id and region (if set).
|
||||
func siteAttrs() []attribute.KeyValue {
|
||||
var out []attribute.KeyValue
|
||||
if s := getSiteID(); s != "" {
|
||||
out = append(out, attribute.String("site_id", s))
|
||||
}
|
||||
if r := getRegion(); r != "" {
|
||||
out = append(out, attribute.String("region", r))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// SiteLabelKVs exposes site label KVs for other packages (e.g., proxy manager).
|
||||
func SiteLabelKVs() []attribute.KeyValue {
|
||||
if !ShouldIncludeSiteLabels() {
|
||||
return nil
|
||||
}
|
||||
return siteAttrs()
|
||||
}
|
||||
|
||||
// ShouldIncludeTunnelID returns whether tunnel_id labels should be emitted.
|
||||
func ShouldIncludeTunnelID() bool {
|
||||
if v, ok := includeTunnelIDVal.Load().(bool); ok {
|
||||
return v
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldIncludeSiteLabels returns whether site_id/region should be emitted as
|
||||
// metric labels in addition to resource attributes.
|
||||
func ShouldIncludeSiteLabels() bool {
|
||||
if v, ok := includeSiteLabelVal.Load().(bool); ok {
|
||||
return v
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getenv(k, d string) string {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
return v
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func getdur(k string, d time.Duration) time.Duration {
|
||||
if v := os.Getenv(k); v != "" {
|
||||
if p, e := time.ParseDuration(v); e == nil {
|
||||
return p
|
||||
}
|
||||
}
|
||||
return d
|
||||
}
|
||||
53
internal/telemetry/telemetry_attrfilter_test.go
Normal file
53
internal/telemetry/telemetry_attrfilter_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
// Test that disallowed attributes are filtered from the exposition.
|
||||
func TestAttributeFilterDropsUnknownKeys(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
resetMetricsForTest()
|
||||
t.Setenv("NEWT_METRICS_INCLUDE_SITE_LABELS", "true")
|
||||
cfg := Config{ServiceName: "newt", PromEnabled: true, AdminAddr: "127.0.0.1:0"}
|
||||
tel, err := Init(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("init: %v", err)
|
||||
}
|
||||
defer func() { _ = tel.Shutdown(context.Background()) }()
|
||||
|
||||
if tel.PrometheusHandler == nil {
|
||||
t.Fatalf("prom handler nil")
|
||||
}
|
||||
ts := httptest.NewServer(tel.PrometheusHandler)
|
||||
defer ts.Close()
|
||||
|
||||
// Add samples with disallowed attribute keys
|
||||
for _, k := range []string{"forbidden", "site_id", "host"} {
|
||||
set := attribute.NewSet(attribute.String(k, "x"))
|
||||
AddTunnelBytesSet(ctx, 123, set)
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
resp, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("GET: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
body := string(b)
|
||||
if strings.Contains(body, "forbidden=") {
|
||||
t.Fatalf("unexpected forbidden attribute leaked into metrics: %s", body)
|
||||
}
|
||||
if !strings.Contains(body, "site_id=\"x\"") {
|
||||
t.Fatalf("expected allowed attribute site_id to be present in metrics, got: %s", body)
|
||||
}
|
||||
}
|
||||
76
internal/telemetry/telemetry_golden_test.go
Normal file
76
internal/telemetry/telemetry_golden_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Golden test that /metrics contains expected metric names.
|
||||
func TestMetricsGoldenContains(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
resetMetricsForTest()
|
||||
t.Setenv("NEWT_METRICS_INCLUDE_SITE_LABELS", "true")
|
||||
cfg := Config{ServiceName: "newt", PromEnabled: true, AdminAddr: "127.0.0.1:0", BuildVersion: "test"}
|
||||
tel, err := Init(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("telemetry init error: %v", err)
|
||||
}
|
||||
defer func() { _ = tel.Shutdown(context.Background()) }()
|
||||
|
||||
if tel.PrometheusHandler == nil {
|
||||
t.Fatalf("prom handler nil")
|
||||
}
|
||||
ts := httptest.NewServer(tel.PrometheusHandler)
|
||||
defer ts.Close()
|
||||
|
||||
// Trigger counters to ensure they appear in the scrape
|
||||
IncConnAttempt(ctx, "websocket", "success")
|
||||
IncWSReconnect(ctx, "io_error")
|
||||
IncProxyConnectionEvent(ctx, "", "tcp", ProxyConnectionOpened)
|
||||
if tel.MeterProvider != nil {
|
||||
_ = tel.MeterProvider.ForceFlush(ctx)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var body string
|
||||
for i := 0; i < 5; i++ {
|
||||
resp, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("GET metrics failed: %v", err)
|
||||
}
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
body = string(b)
|
||||
if strings.Contains(body, "newt_connection_attempts_total") {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
f, err := os.Open(filepath.Join("testdata", "expected_contains.golden"))
|
||||
if err != nil {
|
||||
t.Fatalf("read golden: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
needle := strings.TrimSpace(s.Text())
|
||||
if needle == "" {
|
||||
continue
|
||||
}
|
||||
if !strings.Contains(body, needle) {
|
||||
t.Fatalf("expected metrics body to contain %q. body=\n%s", needle, body)
|
||||
}
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
t.Fatalf("scan golden: %v", err)
|
||||
}
|
||||
}
|
||||
65
internal/telemetry/telemetry_smoke_test.go
Normal file
65
internal/telemetry/telemetry_smoke_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package telemetry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Smoke test that /metrics contains at least one newt_* metric when Prom exporter is enabled.
|
||||
func TestMetricsSmoke(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
resetMetricsForTest()
|
||||
t.Setenv("NEWT_METRICS_INCLUDE_SITE_LABELS", "true")
|
||||
cfg := Config{
|
||||
ServiceName: "newt",
|
||||
PromEnabled: true,
|
||||
OTLPEnabled: false,
|
||||
AdminAddr: "127.0.0.1:0",
|
||||
BuildVersion: "test",
|
||||
BuildCommit: "deadbeef",
|
||||
MetricExportInterval: 5 * time.Second,
|
||||
}
|
||||
tel, err := Init(ctx, cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("telemetry init error: %v", err)
|
||||
}
|
||||
defer func() { _ = tel.Shutdown(context.Background()) }()
|
||||
|
||||
// Serve the Prom handler on a test server
|
||||
if tel.PrometheusHandler == nil {
|
||||
t.Fatalf("Prometheus handler nil; PromEnabled should enable it")
|
||||
}
|
||||
ts := httptest.NewServer(tel.PrometheusHandler)
|
||||
defer ts.Close()
|
||||
|
||||
// Record a simple metric and then fetch /metrics
|
||||
IncConnAttempt(ctx, "websocket", "success")
|
||||
if tel.MeterProvider != nil {
|
||||
_ = tel.MeterProvider.ForceFlush(ctx)
|
||||
}
|
||||
// Give the exporter a tick to collect
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
var body string
|
||||
for i := 0; i < 5; i++ {
|
||||
resp, err := http.Get(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatalf("GET /metrics failed: %v", err)
|
||||
}
|
||||
b, _ := io.ReadAll(resp.Body)
|
||||
_ = resp.Body.Close()
|
||||
body = string(b)
|
||||
if strings.Contains(body, "newt_connection_attempts_total") {
|
||||
break
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
if !strings.Contains(body, "newt_connection_attempts_total") {
|
||||
t.Fatalf("expected newt_connection_attempts_total in metrics, got:\n%s", body)
|
||||
}
|
||||
}
|
||||
3
internal/telemetry/testdata/expected_contains.golden
vendored
Normal file
3
internal/telemetry/testdata/expected_contains.golden
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
newt_connection_attempts_total
|
||||
newt_websocket_reconnects_total
|
||||
newt_proxy_connections_total
|
||||
@@ -2,15 +2,15 @@ package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Logger struct holds the logger instance
|
||||
type Logger struct {
|
||||
logger *log.Logger
|
||||
writer LogWriter
|
||||
level LogLevel
|
||||
}
|
||||
|
||||
@@ -19,17 +19,29 @@ var (
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// NewLogger creates a new logger instance
|
||||
// NewLogger creates a new logger instance with the default StandardWriter
|
||||
func NewLogger() *Logger {
|
||||
return &Logger{
|
||||
logger: log.New(os.Stdout, "", 0),
|
||||
writer: NewStandardWriter(),
|
||||
level: DEBUG,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLoggerWithWriter creates a new logger instance with a custom LogWriter
|
||||
func NewLoggerWithWriter(writer LogWriter) *Logger {
|
||||
return &Logger{
|
||||
writer: writer,
|
||||
level: DEBUG,
|
||||
}
|
||||
}
|
||||
|
||||
// Init initializes the default logger
|
||||
func Init() *Logger {
|
||||
func Init(logger *Logger) *Logger {
|
||||
once.Do(func() {
|
||||
if logger != nil {
|
||||
defaultLogger = logger
|
||||
return
|
||||
}
|
||||
defaultLogger = NewLogger()
|
||||
})
|
||||
return defaultLogger
|
||||
@@ -38,7 +50,7 @@ func Init() *Logger {
|
||||
// GetLogger returns the default logger instance
|
||||
func GetLogger() *Logger {
|
||||
if defaultLogger == nil {
|
||||
Init()
|
||||
Init(nil)
|
||||
}
|
||||
return defaultLogger
|
||||
}
|
||||
@@ -48,14 +60,21 @@ func (l *Logger) SetLevel(level LogLevel) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for the logger (only works with StandardWriter)
|
||||
func (l *Logger) SetOutput(output *os.File) {
|
||||
if sw, ok := l.writer.(*StandardWriter); ok {
|
||||
sw.SetOutput(output)
|
||||
}
|
||||
}
|
||||
|
||||
// log handles the actual logging
|
||||
func (l *Logger) log(level LogLevel, format string, args ...interface{}) {
|
||||
if level < l.level {
|
||||
return
|
||||
}
|
||||
timestamp := time.Now().Format("2006/01/02 15:04:05")
|
||||
|
||||
message := fmt.Sprintf(format, args...)
|
||||
l.logger.Printf("%s: %s %s", level.String(), timestamp, message)
|
||||
l.writer.Write(level, time.Now(), message)
|
||||
}
|
||||
|
||||
// Debug logs debug level messages
|
||||
@@ -104,3 +123,31 @@ func Error(format string, args ...interface{}) {
|
||||
func Fatal(format string, args ...interface{}) {
|
||||
GetLogger().Fatal(format, args...)
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for the default logger
|
||||
func SetOutput(output *os.File) {
|
||||
GetLogger().SetOutput(output)
|
||||
}
|
||||
|
||||
// WireGuardLogger is a wrapper type that matches WireGuard's Logger interface
|
||||
type WireGuardLogger struct {
|
||||
Verbosef func(format string, args ...any)
|
||||
Errorf func(format string, args ...any)
|
||||
}
|
||||
|
||||
// GetWireGuardLogger returns a WireGuard-compatible logger that writes to the newt logger
|
||||
// The prepend string is added as a prefix to all log messages
|
||||
func (l *Logger) GetWireGuardLogger(prepend string) *WireGuardLogger {
|
||||
return &WireGuardLogger{
|
||||
Verbosef: func(format string, args ...any) {
|
||||
// if the format string contains "Sending keepalive packet", skip debug logging to reduce noise
|
||||
if strings.Contains(format, "Sending keepalive packet") {
|
||||
return
|
||||
}
|
||||
l.Debug(prepend+format, args...)
|
||||
},
|
||||
Errorf: func(format string, args ...any) {
|
||||
l.Error(prepend+format, args...)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
54
logger/writer.go
Normal file
54
logger/writer.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LogWriter is an interface for writing log messages
|
||||
// Implement this interface to create custom log backends (OS log, syslog, etc.)
|
||||
type LogWriter interface {
|
||||
// Write writes a log message with the given level, timestamp, and formatted message
|
||||
Write(level LogLevel, timestamp time.Time, message string)
|
||||
}
|
||||
|
||||
// StandardWriter is the default log writer that writes to an io.Writer
|
||||
type StandardWriter struct {
|
||||
output *os.File
|
||||
timezone *time.Location
|
||||
}
|
||||
|
||||
// NewStandardWriter creates a new standard writer with the default configuration
|
||||
func NewStandardWriter() *StandardWriter {
|
||||
// Get timezone from environment variable or use local timezone
|
||||
timezone := os.Getenv("LOGGER_TIMEZONE")
|
||||
var location *time.Location
|
||||
var err error
|
||||
|
||||
if timezone != "" {
|
||||
location, err = time.LoadLocation(timezone)
|
||||
if err != nil {
|
||||
// If invalid timezone, fall back to local
|
||||
location = time.Local
|
||||
}
|
||||
} else {
|
||||
location = time.Local
|
||||
}
|
||||
|
||||
return &StandardWriter{
|
||||
output: os.Stdout,
|
||||
timezone: location,
|
||||
}
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination
|
||||
func (w *StandardWriter) SetOutput(output *os.File) {
|
||||
w.output = output
|
||||
}
|
||||
|
||||
// Write implements the LogWriter interface
|
||||
func (w *StandardWriter) Write(level LogLevel, timestamp time.Time, message string) {
|
||||
formattedTime := timestamp.In(w.timezone).Format("2006/01/02 15:04:05")
|
||||
fmt.Fprintf(w.output, "%s: %s %s\n", level.String(), formattedTime, message)
|
||||
}
|
||||
701
netstack2/handlers.go
Normal file
701
netstack2/handlers.go
Normal file
@@ -0,0 +1,701 @@
|
||||
/* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright (C) 2017-2025 WireGuard LLC. All Rights Reserved.
|
||||
*/
|
||||
|
||||
package netstack2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.org/x/net/icmp"
|
||||
"golang.org/x/net/ipv4"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/checksum"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
"gvisor.dev/gvisor/pkg/waiter"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultWndSize if set to zero, the default
|
||||
// receive window buffer size is used instead.
|
||||
defaultWndSize = 0
|
||||
|
||||
// maxConnAttempts specifies the maximum number
|
||||
// of in-flight tcp connection attempts.
|
||||
maxConnAttempts = 2 << 10
|
||||
|
||||
// tcpKeepaliveCount is the maximum number of
|
||||
// TCP keep-alive probes to send before giving up
|
||||
// and killing the connection if no response is
|
||||
// obtained from the other end.
|
||||
tcpKeepaliveCount = 9
|
||||
|
||||
// tcpKeepaliveIdle specifies the time a connection
|
||||
// must remain idle before the first TCP keepalive
|
||||
// packet is sent. Once this time is reached,
|
||||
// tcpKeepaliveInterval option is used instead.
|
||||
tcpKeepaliveIdle = 60 * time.Second
|
||||
|
||||
// tcpKeepaliveInterval specifies the interval
|
||||
// time between sending TCP keepalive packets.
|
||||
tcpKeepaliveInterval = 30 * time.Second
|
||||
|
||||
// tcpConnectTimeout is the default timeout for TCP handshakes.
|
||||
tcpConnectTimeout = 5 * time.Second
|
||||
|
||||
// tcpWaitTimeout implements a TCP half-close timeout.
|
||||
tcpWaitTimeout = 60 * time.Second
|
||||
|
||||
// udpSessionTimeout is the default timeout for UDP sessions.
|
||||
udpSessionTimeout = 60 * time.Second
|
||||
|
||||
// Buffer size for copying data
|
||||
bufferSize = 32 * 1024
|
||||
|
||||
// icmpTimeout is the default timeout for ICMP ping requests.
|
||||
icmpTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
// TCPHandler handles TCP connections from netstack
|
||||
type TCPHandler struct {
|
||||
stack *stack.Stack
|
||||
proxyHandler *ProxyHandler
|
||||
}
|
||||
|
||||
// UDPHandler handles UDP connections from netstack
|
||||
type UDPHandler struct {
|
||||
stack *stack.Stack
|
||||
proxyHandler *ProxyHandler
|
||||
}
|
||||
|
||||
// ICMPHandler handles ICMP packets from netstack
|
||||
type ICMPHandler struct {
|
||||
stack *stack.Stack
|
||||
proxyHandler *ProxyHandler
|
||||
}
|
||||
|
||||
// NewTCPHandler creates a new TCP handler
|
||||
func NewTCPHandler(s *stack.Stack, ph *ProxyHandler) *TCPHandler {
|
||||
return &TCPHandler{stack: s, proxyHandler: ph}
|
||||
}
|
||||
|
||||
// NewUDPHandler creates a new UDP handler
|
||||
func NewUDPHandler(s *stack.Stack, ph *ProxyHandler) *UDPHandler {
|
||||
return &UDPHandler{stack: s, proxyHandler: ph}
|
||||
}
|
||||
|
||||
// NewICMPHandler creates a new ICMP handler
|
||||
func NewICMPHandler(s *stack.Stack, ph *ProxyHandler) *ICMPHandler {
|
||||
return &ICMPHandler{stack: s, proxyHandler: ph}
|
||||
}
|
||||
|
||||
// InstallTCPHandler installs the TCP forwarder on the stack
|
||||
func (h *TCPHandler) InstallTCPHandler() error {
|
||||
tcpForwarder := tcp.NewForwarder(h.stack, defaultWndSize, maxConnAttempts, func(r *tcp.ForwarderRequest) {
|
||||
var (
|
||||
wq waiter.Queue
|
||||
ep tcpip.Endpoint
|
||||
err tcpip.Error
|
||||
id = r.ID()
|
||||
)
|
||||
|
||||
// Perform a TCP three-way handshake
|
||||
ep, err = r.CreateEndpoint(&wq)
|
||||
if err != nil {
|
||||
// RST: prevent potential half-open TCP connection leak
|
||||
r.Complete(true)
|
||||
return
|
||||
}
|
||||
defer r.Complete(false)
|
||||
|
||||
// Set socket options
|
||||
setTCPSocketOptions(h.stack, ep)
|
||||
|
||||
// Create TCP connection from netstack endpoint
|
||||
netstackConn := gonet.NewTCPConn(&wq, ep)
|
||||
|
||||
// Handle the connection in a goroutine
|
||||
go h.handleTCPConn(netstackConn, id)
|
||||
})
|
||||
|
||||
h.stack.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpForwarder.HandlePacket)
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleTCPConn handles a TCP connection by proxying it to the actual target
|
||||
func (h *TCPHandler) handleTCPConn(netstackConn *gonet.TCPConn, id stack.TransportEndpointID) {
|
||||
defer netstackConn.Close()
|
||||
|
||||
// Extract source and target address from the connection ID
|
||||
srcIP := id.RemoteAddress.String()
|
||||
srcPort := id.RemotePort
|
||||
dstIP := id.LocalAddress.String()
|
||||
dstPort := id.LocalPort
|
||||
|
||||
logger.Info("TCP Forwarder: Handling connection %s:%d -> %s:%d", srcIP, srcPort, dstIP, dstPort)
|
||||
|
||||
// Check if there's a destination rewrite for this connection (e.g., localhost targets)
|
||||
actualDstIP := dstIP
|
||||
if h.proxyHandler != nil {
|
||||
if rewrittenAddr, ok := h.proxyHandler.LookupDestinationRewrite(srcIP, dstIP, dstPort, uint8(tcp.ProtocolNumber)); ok {
|
||||
actualDstIP = rewrittenAddr.String()
|
||||
logger.Info("TCP Forwarder: Using rewritten destination %s (original: %s)", actualDstIP, dstIP)
|
||||
}
|
||||
}
|
||||
|
||||
targetAddr := fmt.Sprintf("%s:%d", actualDstIP, dstPort)
|
||||
|
||||
// Create context with timeout for connection establishment
|
||||
ctx, cancel := context.WithTimeout(context.Background(), tcpConnectTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Dial the actual target using standard net package
|
||||
var d net.Dialer
|
||||
targetConn, err := d.DialContext(ctx, "tcp", targetAddr)
|
||||
if err != nil {
|
||||
logger.Info("TCP Forwarder: Failed to connect to %s: %v", targetAddr, err)
|
||||
// Connection failed, netstack will handle RST
|
||||
return
|
||||
}
|
||||
defer targetConn.Close()
|
||||
|
||||
logger.Info("TCP Forwarder: Successfully connected to %s, starting bidirectional copy", targetAddr)
|
||||
|
||||
// Bidirectional copy between netstack and target
|
||||
pipeTCP(netstackConn, targetConn)
|
||||
}
|
||||
|
||||
// pipeTCP copies data bidirectionally between two connections
|
||||
func pipeTCP(origin, remote net.Conn) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
go unidirectionalStreamTCP(remote, origin, "origin->remote", &wg)
|
||||
go unidirectionalStreamTCP(origin, remote, "remote->origin", &wg)
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// unidirectionalStreamTCP copies data in one direction
|
||||
func unidirectionalStreamTCP(dst, src net.Conn, dir string, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
buf := make([]byte, bufferSize)
|
||||
_, _ = io.CopyBuffer(dst, src, buf)
|
||||
|
||||
// Do the upload/download side TCP half-close
|
||||
if cr, ok := src.(interface{ CloseRead() error }); ok {
|
||||
cr.CloseRead()
|
||||
}
|
||||
if cw, ok := dst.(interface{ CloseWrite() error }); ok {
|
||||
cw.CloseWrite()
|
||||
}
|
||||
|
||||
// Set TCP half-close timeout
|
||||
dst.SetReadDeadline(time.Now().Add(tcpWaitTimeout))
|
||||
}
|
||||
|
||||
// setTCPSocketOptions sets TCP socket options for better performance
|
||||
func setTCPSocketOptions(s *stack.Stack, ep tcpip.Endpoint) {
|
||||
// TCP keepalive options
|
||||
ep.SocketOptions().SetKeepAlive(true)
|
||||
|
||||
idle := tcpip.KeepaliveIdleOption(tcpKeepaliveIdle)
|
||||
ep.SetSockOpt(&idle)
|
||||
|
||||
interval := tcpip.KeepaliveIntervalOption(tcpKeepaliveInterval)
|
||||
ep.SetSockOpt(&interval)
|
||||
|
||||
ep.SetSockOptInt(tcpip.KeepaliveCountOption, tcpKeepaliveCount)
|
||||
|
||||
// TCP send/recv buffer size
|
||||
var ss tcpip.TCPSendBufferSizeRangeOption
|
||||
if err := s.TransportProtocolOption(tcp.ProtocolNumber, &ss); err == nil {
|
||||
ep.SocketOptions().SetSendBufferSize(int64(ss.Default), false)
|
||||
}
|
||||
|
||||
var rs tcpip.TCPReceiveBufferSizeRangeOption
|
||||
if err := s.TransportProtocolOption(tcp.ProtocolNumber, &rs); err == nil {
|
||||
ep.SocketOptions().SetReceiveBufferSize(int64(rs.Default), false)
|
||||
}
|
||||
}
|
||||
|
||||
// InstallUDPHandler installs the UDP forwarder on the stack
|
||||
func (h *UDPHandler) InstallUDPHandler() error {
|
||||
udpForwarder := udp.NewForwarder(h.stack, func(r *udp.ForwarderRequest) {
|
||||
var (
|
||||
wq waiter.Queue
|
||||
id = r.ID()
|
||||
)
|
||||
|
||||
ep, err := r.CreateEndpoint(&wq)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Create UDP connection from netstack endpoint
|
||||
netstackConn := gonet.NewUDPConn(&wq, ep)
|
||||
|
||||
// Handle the connection in a goroutine
|
||||
go h.handleUDPConn(netstackConn, id)
|
||||
})
|
||||
|
||||
h.stack.SetTransportProtocolHandler(udp.ProtocolNumber, udpForwarder.HandlePacket)
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleUDPConn handles a UDP connection by proxying it to the actual target
|
||||
func (h *UDPHandler) handleUDPConn(netstackConn *gonet.UDPConn, id stack.TransportEndpointID) {
|
||||
defer netstackConn.Close()
|
||||
|
||||
// Extract source and target address from the connection ID
|
||||
srcIP := id.RemoteAddress.String()
|
||||
srcPort := id.RemotePort
|
||||
dstIP := id.LocalAddress.String()
|
||||
dstPort := id.LocalPort
|
||||
|
||||
logger.Info("UDP Forwarder: Handling connection %s:%d -> %s:%d", srcIP, srcPort, dstIP, dstPort)
|
||||
|
||||
// Check if there's a destination rewrite for this connection (e.g., localhost targets)
|
||||
actualDstIP := dstIP
|
||||
if h.proxyHandler != nil {
|
||||
if rewrittenAddr, ok := h.proxyHandler.LookupDestinationRewrite(srcIP, dstIP, dstPort, uint8(udp.ProtocolNumber)); ok {
|
||||
actualDstIP = rewrittenAddr.String()
|
||||
logger.Info("UDP Forwarder: Using rewritten destination %s (original: %s)", actualDstIP, dstIP)
|
||||
}
|
||||
}
|
||||
|
||||
targetAddr := fmt.Sprintf("%s:%d", actualDstIP, dstPort)
|
||||
|
||||
// Resolve target address
|
||||
remoteUDPAddr, err := net.ResolveUDPAddr("udp", targetAddr)
|
||||
if err != nil {
|
||||
logger.Info("UDP Forwarder: Failed to resolve %s: %v", targetAddr, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Resolve client address (for sending responses back)
|
||||
clientAddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", srcIP, srcPort))
|
||||
if err != nil {
|
||||
logger.Info("UDP Forwarder: Failed to resolve client %s:%d: %v", srcIP, srcPort, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Create unconnected UDP socket (so we can use WriteTo)
|
||||
targetConn, err := net.ListenUDP("udp", nil)
|
||||
if err != nil {
|
||||
logger.Info("UDP Forwarder: Failed to create UDP socket: %v", err)
|
||||
return
|
||||
}
|
||||
defer targetConn.Close()
|
||||
|
||||
logger.Info("UDP Forwarder: Successfully created UDP socket for %s, starting bidirectional copy", targetAddr)
|
||||
|
||||
// Bidirectional copy between netstack and target
|
||||
pipeUDP(netstackConn, targetConn, remoteUDPAddr, clientAddr, udpSessionTimeout)
|
||||
}
|
||||
|
||||
// pipeUDP copies UDP packets bidirectionally
|
||||
func pipeUDP(origin, remote net.PacketConn, serverAddr, clientAddr net.Addr, timeout time.Duration) {
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
||||
// Read from origin (netstack), write to remote (target server)
|
||||
go unidirectionalPacketStream(remote, origin, serverAddr, "origin->remote", &wg, timeout)
|
||||
// Read from remote (target server), write to origin (netstack) with client address
|
||||
go unidirectionalPacketStream(origin, remote, clientAddr, "remote->origin", &wg, timeout)
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// unidirectionalPacketStream copies packets in one direction
|
||||
func unidirectionalPacketStream(dst, src net.PacketConn, to net.Addr, dir string, wg *sync.WaitGroup, timeout time.Duration) {
|
||||
defer wg.Done()
|
||||
|
||||
logger.Info("UDP %s: Starting packet stream (to=%v)", dir, to)
|
||||
err := copyPacketData(dst, src, to, timeout)
|
||||
if err != nil {
|
||||
logger.Info("UDP %s: Stream ended with error: %v", dir, err)
|
||||
} else {
|
||||
logger.Info("UDP %s: Stream ended (timeout)", dir)
|
||||
}
|
||||
}
|
||||
|
||||
// copyPacketData copies UDP packet data with timeout
|
||||
func copyPacketData(dst, src net.PacketConn, to net.Addr, timeout time.Duration) error {
|
||||
buf := make([]byte, 65535) // Max UDP packet size
|
||||
|
||||
for {
|
||||
src.SetReadDeadline(time.Now().Add(timeout))
|
||||
n, srcAddr, err := src.ReadFrom(buf)
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
return nil // ignore I/O timeout
|
||||
} else if err == io.EOF {
|
||||
return nil // ignore EOF
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("UDP copyPacketData: Read %d bytes from %v", n, srcAddr)
|
||||
|
||||
// Determine write destination
|
||||
writeAddr := to
|
||||
if writeAddr == nil {
|
||||
// If no destination specified, use the source address from the packet
|
||||
writeAddr = srcAddr
|
||||
}
|
||||
|
||||
written, err := dst.WriteTo(buf[:n], writeAddr)
|
||||
if err != nil {
|
||||
logger.Info("UDP copyPacketData: Write error to %v: %v", writeAddr, err)
|
||||
return err
|
||||
}
|
||||
logger.Info("UDP copyPacketData: Wrote %d bytes to %v", written, writeAddr)
|
||||
|
||||
dst.SetReadDeadline(time.Now().Add(timeout))
|
||||
}
|
||||
}
|
||||
|
||||
// InstallICMPHandler installs the ICMP handler on the stack
|
||||
func (h *ICMPHandler) InstallICMPHandler() error {
|
||||
h.stack.SetTransportProtocolHandler(header.ICMPv4ProtocolNumber, h.handleICMPPacket)
|
||||
logger.Debug("ICMP Handler: Installed ICMP protocol handler")
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleICMPPacket handles incoming ICMP packets
|
||||
func (h *ICMPHandler) handleICMPPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) bool {
|
||||
logger.Debug("ICMP Handler: Received ICMP packet from %s to %s", id.RemoteAddress, id.LocalAddress)
|
||||
|
||||
// Get the ICMP header from the packet
|
||||
icmpData := pkt.TransportHeader().Slice()
|
||||
if len(icmpData) < header.ICMPv4MinimumSize {
|
||||
logger.Debug("ICMP Handler: Packet too small for ICMP header: %d bytes", len(icmpData))
|
||||
return false
|
||||
}
|
||||
|
||||
icmpHdr := header.ICMPv4(icmpData)
|
||||
icmpType := icmpHdr.Type()
|
||||
icmpCode := icmpHdr.Code()
|
||||
|
||||
logger.Debug("ICMP Handler: Type=%d, Code=%d, Ident=%d, Seq=%d",
|
||||
icmpType, icmpCode, icmpHdr.Ident(), icmpHdr.Sequence())
|
||||
|
||||
// Only handle Echo Request (ping)
|
||||
if icmpType != header.ICMPv4Echo {
|
||||
logger.Debug("ICMP Handler: Ignoring non-echo ICMP type: %d", icmpType)
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract source and destination addresses
|
||||
srcIP := id.RemoteAddress.String()
|
||||
dstIP := id.LocalAddress.String()
|
||||
|
||||
logger.Info("ICMP Handler: Echo Request from %s to %s (ident=%d, seq=%d)",
|
||||
srcIP, dstIP, icmpHdr.Ident(), icmpHdr.Sequence())
|
||||
|
||||
// Convert to netip.Addr for subnet matching
|
||||
srcAddr, err := netip.ParseAddr(srcIP)
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to parse source IP %s: %v", srcIP, err)
|
||||
return false
|
||||
}
|
||||
dstAddr, err := netip.ParseAddr(dstIP)
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to parse dest IP %s: %v", dstIP, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Check subnet rules (use port 0 for ICMP since it doesn't have ports)
|
||||
if h.proxyHandler == nil {
|
||||
logger.Debug("ICMP Handler: No proxy handler configured")
|
||||
return false
|
||||
}
|
||||
|
||||
matchedRule := h.proxyHandler.subnetLookup.Match(srcAddr, dstAddr, 0, header.ICMPv4ProtocolNumber)
|
||||
if matchedRule == nil {
|
||||
logger.Debug("ICMP Handler: No matching subnet rule for %s -> %s", srcIP, dstIP)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Info("ICMP Handler: Matched subnet rule for %s -> %s", srcIP, dstIP)
|
||||
|
||||
// Determine actual destination (with possible rewrite)
|
||||
actualDstIP := dstIP
|
||||
if matchedRule.RewriteTo != "" {
|
||||
resolvedAddr, err := h.proxyHandler.resolveRewriteAddress(matchedRule.RewriteTo)
|
||||
if err != nil {
|
||||
logger.Info("ICMP Handler: Failed to resolve rewrite address %s: %v", matchedRule.RewriteTo, err)
|
||||
} else {
|
||||
actualDstIP = resolvedAddr.String()
|
||||
logger.Info("ICMP Handler: Using rewritten destination %s (original: %s)", actualDstIP, dstIP)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the full ICMP payload (including the data after the header)
|
||||
icmpPayload := pkt.Data().AsRange().ToSlice()
|
||||
|
||||
// Handle the ping in a goroutine to avoid blocking
|
||||
go h.proxyPing(srcIP, dstIP, actualDstIP, icmpHdr.Ident(), icmpHdr.Sequence(), icmpPayload)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// proxyPing sends a ping to the actual destination and injects the reply back
|
||||
func (h *ICMPHandler) proxyPing(srcIP, originalDstIP, actualDstIP string, ident, seq uint16, payload []byte) {
|
||||
logger.Debug("ICMP Handler: Proxying ping from %s to %s (actual: %s), ident=%d, seq=%d",
|
||||
srcIP, originalDstIP, actualDstIP, ident, seq)
|
||||
|
||||
// Try three methods in order: ip4:icmp -> udp4 -> ping command
|
||||
// Track which method succeeded so we can handle identifier matching correctly
|
||||
method, success := h.tryICMPMethods(actualDstIP, ident, seq, payload)
|
||||
|
||||
if !success {
|
||||
logger.Info("ICMP Handler: All ping methods failed for %s", actualDstIP)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("ICMP Handler: Ping successful to %s using %s, injecting reply (ident=%d, seq=%d)",
|
||||
actualDstIP, method, ident, seq)
|
||||
|
||||
// Build the reply packet to inject back into the netstack
|
||||
// The reply should appear to come from the original destination (before rewrite)
|
||||
h.injectICMPReply(srcIP, originalDstIP, ident, seq, payload)
|
||||
}
|
||||
|
||||
// tryICMPMethods tries all available ICMP methods in order
|
||||
func (h *ICMPHandler) tryICMPMethods(actualDstIP string, ident, seq uint16, payload []byte) (string, bool) {
|
||||
if h.tryRawICMP(actualDstIP, ident, seq, payload, false) {
|
||||
return "raw ICMP", true
|
||||
}
|
||||
if h.tryUnprivilegedICMP(actualDstIP, ident, seq, payload) {
|
||||
return "unprivileged ICMP", true
|
||||
}
|
||||
if h.tryPingCommand(actualDstIP, ident, seq, payload) {
|
||||
return "ping command", true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// tryRawICMP attempts to ping using raw ICMP sockets (requires CAP_NET_RAW or root)
|
||||
func (h *ICMPHandler) tryRawICMP(actualDstIP string, ident, seq uint16, payload []byte, ignoreIdent bool) bool {
|
||||
conn, err := icmp.ListenPacket("ip4:icmp", "0.0.0.0")
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Raw ICMP socket not available: %v", err)
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
logger.Debug("ICMP Handler: Using raw ICMP socket")
|
||||
return h.sendAndReceiveICMP(conn, actualDstIP, ident, seq, payload, false, ignoreIdent)
|
||||
}
|
||||
|
||||
// tryUnprivilegedICMP attempts to ping using unprivileged ICMP (requires ping_group_range configured)
|
||||
func (h *ICMPHandler) tryUnprivilegedICMP(actualDstIP string, ident, seq uint16, payload []byte) bool {
|
||||
conn, err := icmp.ListenPacket("udp4", "0.0.0.0")
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Unprivileged ICMP socket not available: %v", err)
|
||||
return false
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
logger.Debug("ICMP Handler: Using unprivileged ICMP socket")
|
||||
// Unprivileged ICMP doesn't let us control the identifier, so we ignore it in matching
|
||||
return h.sendAndReceiveICMP(conn, actualDstIP, ident, seq, payload, true, true)
|
||||
}
|
||||
|
||||
// sendAndReceiveICMP sends an ICMP echo request and waits for the reply
|
||||
func (h *ICMPHandler) sendAndReceiveICMP(conn *icmp.PacketConn, actualDstIP string, ident, seq uint16, payload []byte, isUnprivileged bool, ignoreIdent bool) bool {
|
||||
// Build the ICMP echo request message
|
||||
echoMsg := &icmp.Message{
|
||||
Type: ipv4.ICMPTypeEcho,
|
||||
Code: 0,
|
||||
Body: &icmp.Echo{
|
||||
ID: int(ident),
|
||||
Seq: int(seq),
|
||||
Data: payload,
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := echoMsg.Marshal(nil)
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to marshal ICMP message: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Resolve destination address based on socket type
|
||||
var writeErr error
|
||||
if isUnprivileged {
|
||||
// For unprivileged ICMP, use UDP-style addressing
|
||||
udpAddr := &net.UDPAddr{IP: net.ParseIP(actualDstIP)}
|
||||
logger.Debug("ICMP Handler: Sending ping to %s (unprivileged)", udpAddr.String())
|
||||
conn.SetDeadline(time.Now().Add(icmpTimeout))
|
||||
_, writeErr = conn.WriteTo(msgBytes, udpAddr)
|
||||
} else {
|
||||
// For raw ICMP, use IP addressing
|
||||
dst, err := net.ResolveIPAddr("ip4", actualDstIP)
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to resolve destination %s: %v", actualDstIP, err)
|
||||
return false
|
||||
}
|
||||
logger.Debug("ICMP Handler: Sending ping to %s (raw)", dst.String())
|
||||
conn.SetDeadline(time.Now().Add(icmpTimeout))
|
||||
_, writeErr = conn.WriteTo(msgBytes, dst)
|
||||
}
|
||||
|
||||
if writeErr != nil {
|
||||
logger.Debug("ICMP Handler: Failed to send ping to %s: %v", actualDstIP, writeErr)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Debug("ICMP Handler: Ping sent to %s, waiting for reply (ident=%d, seq=%d)", actualDstIP, ident, seq)
|
||||
|
||||
// Wait for reply - loop to filter out non-matching packets
|
||||
replyBuf := make([]byte, 1500)
|
||||
|
||||
for {
|
||||
n, peer, err := conn.ReadFrom(replyBuf)
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to receive ping reply from %s: %v", actualDstIP, err)
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Debug("ICMP Handler: Received %d bytes from %s", n, peer.String())
|
||||
|
||||
// Parse the reply
|
||||
replyMsg, err := icmp.ParseMessage(1, replyBuf[:n])
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: Failed to parse ICMP message: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if it's an echo reply (type 0), not an echo request (type 8)
|
||||
if replyMsg.Type != ipv4.ICMPTypeEchoReply {
|
||||
logger.Debug("ICMP Handler: Received non-echo-reply type: %v, continuing to wait", replyMsg.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
reply, ok := replyMsg.Body.(*icmp.Echo)
|
||||
if !ok {
|
||||
logger.Debug("ICMP Handler: Invalid echo reply body type, continuing to wait")
|
||||
continue
|
||||
}
|
||||
|
||||
// Verify the sequence matches what we sent
|
||||
// For unprivileged ICMP, the kernel controls the identifier, so we only check sequence
|
||||
if reply.Seq != int(seq) {
|
||||
logger.Debug("ICMP Handler: Reply seq mismatch: got seq=%d, want seq=%d", reply.Seq, seq)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ignoreIdent && reply.ID != int(ident) {
|
||||
logger.Debug("ICMP Handler: Reply ident mismatch: got ident=%d, want ident=%d", reply.ID, ident)
|
||||
continue
|
||||
}
|
||||
|
||||
// Found matching reply
|
||||
logger.Debug("ICMP Handler: Received valid echo reply")
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// tryPingCommand attempts to ping using the system ping command (always works, but less control)
|
||||
func (h *ICMPHandler) tryPingCommand(actualDstIP string, ident, seq uint16, payload []byte) bool {
|
||||
logger.Debug("ICMP Handler: Attempting to use system ping command")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), icmpTimeout)
|
||||
defer cancel()
|
||||
|
||||
// Send one ping with timeout
|
||||
// -c 1: count = 1 packet
|
||||
// -W 5: timeout = 5 seconds
|
||||
// -q: quiet output (just summary)
|
||||
cmd := exec.CommandContext(ctx, "ping", "-c", "1", "-W", "5", "-q", actualDstIP)
|
||||
output, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
logger.Debug("ICMP Handler: System ping command failed: %v, output: %s", err, string(output))
|
||||
return false
|
||||
}
|
||||
|
||||
logger.Debug("ICMP Handler: System ping command succeeded")
|
||||
return true
|
||||
}
|
||||
|
||||
// injectICMPReply creates an ICMP echo reply packet and queues it to be sent back through the tunnel
|
||||
func (h *ICMPHandler) injectICMPReply(dstIP, srcIP string, ident, seq uint16, payload []byte) {
|
||||
logger.Debug("ICMP Handler: Creating reply from %s to %s (ident=%d, seq=%d)",
|
||||
srcIP, dstIP, ident, seq)
|
||||
|
||||
// Parse addresses
|
||||
srcAddr, err := netip.ParseAddr(srcIP)
|
||||
if err != nil {
|
||||
logger.Info("ICMP Handler: Failed to parse source IP for reply: %v", err)
|
||||
return
|
||||
}
|
||||
dstAddr, err := netip.ParseAddr(dstIP)
|
||||
if err != nil {
|
||||
logger.Info("ICMP Handler: Failed to parse dest IP for reply: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate total packet size
|
||||
ipHeaderLen := header.IPv4MinimumSize
|
||||
icmpHeaderLen := header.ICMPv4MinimumSize
|
||||
totalLen := ipHeaderLen + icmpHeaderLen + len(payload)
|
||||
|
||||
// Create the packet buffer
|
||||
pkt := make([]byte, totalLen)
|
||||
|
||||
// Build IPv4 header
|
||||
ipHdr := header.IPv4(pkt[:ipHeaderLen])
|
||||
ipHdr.Encode(&header.IPv4Fields{
|
||||
TotalLength: uint16(totalLen),
|
||||
TTL: 64,
|
||||
Protocol: uint8(header.ICMPv4ProtocolNumber),
|
||||
SrcAddr: tcpip.AddrFrom4(srcAddr.As4()),
|
||||
DstAddr: tcpip.AddrFrom4(dstAddr.As4()),
|
||||
})
|
||||
ipHdr.SetChecksum(^ipHdr.CalculateChecksum())
|
||||
|
||||
// Build ICMP header
|
||||
icmpHdr := header.ICMPv4(pkt[ipHeaderLen : ipHeaderLen+icmpHeaderLen])
|
||||
icmpHdr.SetType(header.ICMPv4EchoReply)
|
||||
icmpHdr.SetCode(0)
|
||||
icmpHdr.SetIdent(ident)
|
||||
icmpHdr.SetSequence(seq)
|
||||
|
||||
// Copy payload
|
||||
copy(pkt[ipHeaderLen+icmpHeaderLen:], payload)
|
||||
|
||||
// Calculate ICMP checksum (covers ICMP header + payload)
|
||||
icmpHdr.SetChecksum(0)
|
||||
icmpData := pkt[ipHeaderLen:]
|
||||
icmpHdr.SetChecksum(^checksum.Checksum(icmpData, 0))
|
||||
|
||||
logger.Debug("ICMP Handler: Built reply packet, total length=%d", totalLen)
|
||||
|
||||
// Queue the packet to be sent back through the tunnel
|
||||
if h.proxyHandler != nil {
|
||||
if h.proxyHandler.QueueICMPReply(pkt) {
|
||||
logger.Info("ICMP Handler: Queued echo reply packet for transmission")
|
||||
} else {
|
||||
logger.Info("ICMP Handler: Failed to queue echo reply packet")
|
||||
}
|
||||
} else {
|
||||
logger.Info("ICMP Handler: Cannot queue reply - proxy handler not available")
|
||||
}
|
||||
}
|
||||
797
netstack2/proxy.go
Normal file
797
netstack2/proxy.go
Normal file
@@ -0,0 +1,797 @@
|
||||
package netstack2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"gvisor.dev/gvisor/pkg/buffer"
|
||||
"gvisor.dev/gvisor/pkg/tcpip"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/checksum"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
|
||||
)
|
||||
|
||||
// PortRange represents an allowed range of ports (inclusive) with optional protocol filtering
|
||||
// Protocol can be "tcp", "udp", or "" (empty string means both protocols)
|
||||
type PortRange struct {
|
||||
Min uint16
|
||||
Max uint16
|
||||
Protocol string // "tcp", "udp", or "" for both
|
||||
}
|
||||
|
||||
// SubnetRule represents a subnet with optional port restrictions and source address
|
||||
// When RewriteTo is set, DNAT (Destination Network Address Translation) is performed:
|
||||
// - Incoming packets: destination IP is rewritten to the resolved RewriteTo address
|
||||
// - Outgoing packets: source IP is rewritten back to the original destination
|
||||
//
|
||||
// RewriteTo can be either:
|
||||
// - An IP address with CIDR notation (e.g., "192.168.1.1/32")
|
||||
// - A domain name (e.g., "example.com") which will be resolved at request time
|
||||
//
|
||||
// This allows transparent proxying where traffic appears to come from the rewritten address
|
||||
type SubnetRule struct {
|
||||
SourcePrefix netip.Prefix // Source IP prefix (who is sending)
|
||||
DestPrefix netip.Prefix // Destination IP prefix (where it's going)
|
||||
DisableIcmp bool // If true, ICMP traffic is blocked for this subnet
|
||||
RewriteTo string // Optional rewrite address for DNAT - can be IP/CIDR or domain name
|
||||
PortRanges []PortRange // empty slice means all ports allowed
|
||||
}
|
||||
|
||||
// ruleKey is used as a map key for fast O(1) lookups
|
||||
type ruleKey struct {
|
||||
sourcePrefix string
|
||||
destPrefix string
|
||||
}
|
||||
|
||||
// SubnetLookup provides fast IP subnet and port matching with O(1) lookup performance
|
||||
type SubnetLookup struct {
|
||||
mu sync.RWMutex
|
||||
rules map[ruleKey]*SubnetRule // Map for O(1) lookups by prefix combination
|
||||
}
|
||||
|
||||
// NewSubnetLookup creates a new subnet lookup table
|
||||
func NewSubnetLookup() *SubnetLookup {
|
||||
return &SubnetLookup{
|
||||
rules: make(map[ruleKey]*SubnetRule),
|
||||
}
|
||||
}
|
||||
|
||||
// AddSubnet adds a subnet rule with source and destination prefixes and optional port restrictions
|
||||
// If portRanges is nil or empty, all ports are allowed for this subnet
|
||||
// rewriteTo can be either an IP/CIDR (e.g., "192.168.1.1/32") or a domain name (e.g., "example.com")
|
||||
func (sl *SubnetLookup) AddSubnet(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
key := ruleKey{
|
||||
sourcePrefix: sourcePrefix.String(),
|
||||
destPrefix: destPrefix.String(),
|
||||
}
|
||||
|
||||
sl.rules[key] = &SubnetRule{
|
||||
SourcePrefix: sourcePrefix,
|
||||
DestPrefix: destPrefix,
|
||||
DisableIcmp: disableIcmp,
|
||||
RewriteTo: rewriteTo,
|
||||
PortRanges: portRanges,
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveSubnet removes a subnet rule from the lookup table
|
||||
func (sl *SubnetLookup) RemoveSubnet(sourcePrefix, destPrefix netip.Prefix) {
|
||||
sl.mu.Lock()
|
||||
defer sl.mu.Unlock()
|
||||
|
||||
key := ruleKey{
|
||||
sourcePrefix: sourcePrefix.String(),
|
||||
destPrefix: destPrefix.String(),
|
||||
}
|
||||
|
||||
delete(sl.rules, key)
|
||||
}
|
||||
|
||||
// Match checks if a source IP, destination IP, port, and protocol match any subnet rule
|
||||
// Returns the matched rule if ALL of these conditions are met:
|
||||
// - The source IP is in the rule's source prefix
|
||||
// - The destination IP is in the rule's destination prefix
|
||||
// - The port is in an allowed range (or no port restrictions exist)
|
||||
// - The protocol matches (or the port range allows both protocols)
|
||||
//
|
||||
// proto should be header.TCPProtocolNumber or header.UDPProtocolNumber
|
||||
// Returns nil if no rule matches
|
||||
func (sl *SubnetLookup) Match(srcIP, dstIP netip.Addr, port uint16, proto tcpip.TransportProtocolNumber) *SubnetRule {
|
||||
sl.mu.RLock()
|
||||
defer sl.mu.RUnlock()
|
||||
|
||||
// Iterate through all rules to find matching source and destination prefixes
|
||||
// This is O(n) but necessary since we need to check prefix containment, not exact match
|
||||
for _, rule := range sl.rules {
|
||||
// Check if source and destination IPs match their respective prefixes
|
||||
if !rule.SourcePrefix.Contains(srcIP) {
|
||||
continue
|
||||
}
|
||||
if !rule.DestPrefix.Contains(dstIP) {
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.DisableIcmp && (proto == header.ICMPv4ProtocolNumber || proto == header.ICMPv6ProtocolNumber) {
|
||||
// ICMP is disabled for this subnet
|
||||
return nil
|
||||
}
|
||||
|
||||
// Both IPs match - now check port restrictions
|
||||
// If no port ranges specified, all ports are allowed
|
||||
if len(rule.PortRanges) == 0 {
|
||||
return rule
|
||||
}
|
||||
|
||||
// Check if port and protocol are in any of the allowed ranges
|
||||
for _, pr := range rule.PortRanges {
|
||||
if port >= pr.Min && port <= pr.Max {
|
||||
// Check protocol compatibility
|
||||
if pr.Protocol == "" {
|
||||
// Empty protocol means allow both TCP and UDP
|
||||
return rule
|
||||
}
|
||||
// Check if the packet protocol matches the port range protocol
|
||||
if (pr.Protocol == "tcp" && proto == header.TCPProtocolNumber) ||
|
||||
(pr.Protocol == "udp" && proto == header.UDPProtocolNumber) {
|
||||
return rule
|
||||
}
|
||||
// Port matches but protocol doesn't - continue checking other ranges
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// connKey uniquely identifies a connection for NAT tracking
|
||||
type connKey struct {
|
||||
srcIP string
|
||||
srcPort uint16
|
||||
dstIP string
|
||||
dstPort uint16
|
||||
proto uint8
|
||||
}
|
||||
|
||||
// destKey identifies a destination for handler lookups (without source port since it may change)
|
||||
type destKey struct {
|
||||
srcIP string
|
||||
dstIP string
|
||||
dstPort uint16
|
||||
proto uint8
|
||||
}
|
||||
|
||||
// natState tracks NAT translation state for reverse translation
|
||||
type natState struct {
|
||||
originalDst netip.Addr // Original destination before DNAT
|
||||
rewrittenTo netip.Addr // The address we rewrote to
|
||||
}
|
||||
|
||||
// ProxyHandler handles packet injection and extraction for promiscuous mode
|
||||
type ProxyHandler struct {
|
||||
proxyStack *stack.Stack
|
||||
proxyEp *channel.Endpoint
|
||||
proxyNotifyHandle *channel.NotificationHandle
|
||||
tcpHandler *TCPHandler
|
||||
udpHandler *UDPHandler
|
||||
icmpHandler *ICMPHandler
|
||||
subnetLookup *SubnetLookup
|
||||
natTable map[connKey]*natState
|
||||
destRewriteTable map[destKey]netip.Addr // Maps original dest to rewritten dest for handler lookups
|
||||
natMu sync.RWMutex
|
||||
enabled bool
|
||||
icmpReplies chan []byte // Channel for ICMP reply packets to be sent back through the tunnel
|
||||
notifiable channel.Notification // Notification handler for triggering reads
|
||||
}
|
||||
|
||||
// ProxyHandlerOptions configures the proxy handler
|
||||
type ProxyHandlerOptions struct {
|
||||
EnableTCP bool
|
||||
EnableUDP bool
|
||||
EnableICMP bool
|
||||
MTU int
|
||||
}
|
||||
|
||||
// NewProxyHandler creates a new proxy handler for promiscuous mode
|
||||
func NewProxyHandler(options ProxyHandlerOptions) (*ProxyHandler, error) {
|
||||
if !options.EnableTCP && !options.EnableUDP && !options.EnableICMP {
|
||||
return nil, nil // No proxy needed
|
||||
}
|
||||
|
||||
handler := &ProxyHandler{
|
||||
enabled: true,
|
||||
subnetLookup: NewSubnetLookup(),
|
||||
natTable: make(map[connKey]*natState),
|
||||
destRewriteTable: make(map[destKey]netip.Addr),
|
||||
icmpReplies: make(chan []byte, 256), // Buffer for ICMP reply packets
|
||||
proxyEp: channel.New(1024, uint32(options.MTU), ""),
|
||||
proxyStack: stack.New(stack.Options{
|
||||
NetworkProtocols: []stack.NetworkProtocolFactory{
|
||||
ipv4.NewProtocol,
|
||||
ipv6.NewProtocol,
|
||||
},
|
||||
TransportProtocols: []stack.TransportProtocolFactory{
|
||||
tcp.NewProtocol,
|
||||
udp.NewProtocol,
|
||||
icmp.NewProtocol4,
|
||||
icmp.NewProtocol6,
|
||||
},
|
||||
}),
|
||||
}
|
||||
|
||||
// Initialize TCP handler if enabled
|
||||
if options.EnableTCP {
|
||||
handler.tcpHandler = NewTCPHandler(handler.proxyStack, handler)
|
||||
if err := handler.tcpHandler.InstallTCPHandler(); err != nil {
|
||||
return nil, fmt.Errorf("failed to install TCP handler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize UDP handler if enabled
|
||||
if options.EnableUDP {
|
||||
handler.udpHandler = NewUDPHandler(handler.proxyStack, handler)
|
||||
if err := handler.udpHandler.InstallUDPHandler(); err != nil {
|
||||
return nil, fmt.Errorf("failed to install UDP handler: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize ICMP handler if enabled
|
||||
if options.EnableICMP {
|
||||
handler.icmpHandler = NewICMPHandler(handler.proxyStack, handler)
|
||||
if err := handler.icmpHandler.InstallICMPHandler(); err != nil {
|
||||
return nil, fmt.Errorf("failed to install ICMP handler: %v", err)
|
||||
}
|
||||
logger.Debug("ProxyHandler: ICMP handler enabled")
|
||||
}
|
||||
|
||||
// // Example 1: Add a rule with no port restrictions (all ports allowed)
|
||||
// // This accepts all traffic FROM 10.0.0.0/24 TO 10.20.20.0/24
|
||||
// sourceSubnet := netip.MustParsePrefix("10.0.0.0/24")
|
||||
// destSubnet := netip.MustParsePrefix("10.20.20.0/24")
|
||||
// handler.AddSubnetRule(sourceSubnet, destSubnet, nil)
|
||||
|
||||
// // Example 2: Add a rule with specific port ranges
|
||||
// // This accepts traffic FROM 10.0.0.5/32 TO 10.20.21.21/32 only on ports 80, 443, and 8000-9000
|
||||
// sourceIP := netip.MustParsePrefix("10.0.0.5/32")
|
||||
// destIP := netip.MustParsePrefix("10.20.21.21/32")
|
||||
// handler.AddSubnetRule(sourceIP, destIP, []PortRange{
|
||||
// {Min: 80, Max: 80},
|
||||
// {Min: 443, Max: 443},
|
||||
// {Min: 8000, Max: 9000},
|
||||
// })
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
// AddSubnetRule adds a subnet with optional port restrictions to the proxy handler
|
||||
// sourcePrefix: The IP prefix of the peer sending the data
|
||||
// destPrefix: The IP prefix of the destination
|
||||
// rewriteTo: Optional address to rewrite destination to - can be IP/CIDR or domain name
|
||||
// If portRanges is nil or empty, all ports are allowed for this subnet
|
||||
func (p *ProxyHandler) AddSubnetRule(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
||||
if p == nil || !p.enabled {
|
||||
return
|
||||
}
|
||||
p.subnetLookup.AddSubnet(sourcePrefix, destPrefix, rewriteTo, portRanges, disableIcmp)
|
||||
}
|
||||
|
||||
// RemoveSubnetRule removes a subnet from the proxy handler
|
||||
func (p *ProxyHandler) RemoveSubnetRule(sourcePrefix, destPrefix netip.Prefix) {
|
||||
if p == nil || !p.enabled {
|
||||
return
|
||||
}
|
||||
p.subnetLookup.RemoveSubnet(sourcePrefix, destPrefix)
|
||||
}
|
||||
|
||||
// LookupDestinationRewrite looks up the rewritten destination for a connection
|
||||
// This is used by TCP/UDP handlers to find the actual target address
|
||||
func (p *ProxyHandler) LookupDestinationRewrite(srcIP, dstIP string, dstPort uint16, proto uint8) (netip.Addr, bool) {
|
||||
if p == nil || !p.enabled {
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
|
||||
key := destKey{
|
||||
srcIP: srcIP,
|
||||
dstIP: dstIP,
|
||||
dstPort: dstPort,
|
||||
proto: proto,
|
||||
}
|
||||
|
||||
p.natMu.RLock()
|
||||
defer p.natMu.RUnlock()
|
||||
|
||||
addr, ok := p.destRewriteTable[key]
|
||||
return addr, ok
|
||||
}
|
||||
|
||||
// resolveRewriteAddress resolves a rewrite address which can be either:
|
||||
// - An IP address with CIDR notation (e.g., "192.168.1.1/32") - returns the IP directly
|
||||
// - A plain IP address (e.g., "192.168.1.1") - returns the IP directly
|
||||
// - A domain name (e.g., "example.com") - performs DNS lookup
|
||||
func (p *ProxyHandler) resolveRewriteAddress(rewriteTo string) (netip.Addr, error) {
|
||||
logger.Debug("Resolving rewrite address: %s", rewriteTo)
|
||||
|
||||
// First, try to parse as a CIDR prefix (e.g., "192.168.1.1/32")
|
||||
if prefix, err := netip.ParsePrefix(rewriteTo); err == nil {
|
||||
return prefix.Addr(), nil
|
||||
}
|
||||
|
||||
// Try to parse as a plain IP address (e.g., "192.168.1.1")
|
||||
if addr, err := netip.ParseAddr(rewriteTo); err == nil {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Not an IP address, treat as domain name - perform DNS lookup
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ips, err := net.DefaultResolver.LookupIP(ctx, "ip4", rewriteTo)
|
||||
if err != nil {
|
||||
return netip.Addr{}, fmt.Errorf("failed to resolve domain %s: %w", rewriteTo, err)
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
return netip.Addr{}, fmt.Errorf("no IP addresses found for domain %s", rewriteTo)
|
||||
}
|
||||
|
||||
// Use the first resolved IP address
|
||||
ip := ips[0]
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
addr := netip.AddrFrom4([4]byte{ip4[0], ip4[1], ip4[2], ip4[3]})
|
||||
logger.Debug("Resolved %s to %s", rewriteTo, addr)
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
return netip.Addr{}, fmt.Errorf("no IPv4 address found for domain %s", rewriteTo)
|
||||
}
|
||||
|
||||
// Initialize sets up the promiscuous NIC with the netTun's notification system
|
||||
func (p *ProxyHandler) Initialize(notifiable channel.Notification) error {
|
||||
if p == nil || !p.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Store notifiable for triggering notifications on ICMP replies
|
||||
p.notifiable = notifiable
|
||||
|
||||
// Add notification handler
|
||||
p.proxyNotifyHandle = p.proxyEp.AddNotify(notifiable)
|
||||
|
||||
// Create NIC with promiscuous mode
|
||||
tcpipErr := p.proxyStack.CreateNICWithOptions(1, p.proxyEp, stack.NICOptions{
|
||||
Disabled: false,
|
||||
QDisc: nil,
|
||||
})
|
||||
if tcpipErr != nil {
|
||||
return fmt.Errorf("CreateNIC (proxy): %v", tcpipErr)
|
||||
}
|
||||
|
||||
// Enable promiscuous mode - accepts packets for any destination IP
|
||||
if tcpipErr := p.proxyStack.SetPromiscuousMode(1, true); tcpipErr != nil {
|
||||
return fmt.Errorf("SetPromiscuousMode: %v", tcpipErr)
|
||||
}
|
||||
|
||||
// Enable spoofing - allows sending packets from any source IP
|
||||
if tcpipErr := p.proxyStack.SetSpoofing(1, true); tcpipErr != nil {
|
||||
return fmt.Errorf("SetSpoofing: %v", tcpipErr)
|
||||
}
|
||||
|
||||
// Add default route
|
||||
p.proxyStack.AddRoute(tcpip.Route{
|
||||
Destination: header.IPv4EmptySubnet,
|
||||
NIC: 1,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleIncomingPacket processes incoming packets and determines if they should
|
||||
// be injected into the proxy stack
|
||||
func (p *ProxyHandler) HandleIncomingPacket(packet []byte) bool {
|
||||
if p == nil || !p.enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check minimum packet size
|
||||
if len(packet) < header.IPv4MinimumSize {
|
||||
return false
|
||||
}
|
||||
|
||||
// Only handle IPv4 for now
|
||||
if packet[0]>>4 != 4 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Parse IPv4 header
|
||||
ipv4Header := header.IPv4(packet)
|
||||
srcIP := ipv4Header.SourceAddress()
|
||||
dstIP := ipv4Header.DestinationAddress()
|
||||
|
||||
// Convert gvisor tcpip.Address to netip.Addr
|
||||
srcBytes := srcIP.As4()
|
||||
srcAddr := netip.AddrFrom4(srcBytes)
|
||||
dstBytes := dstIP.As4()
|
||||
dstAddr := netip.AddrFrom4(dstBytes)
|
||||
|
||||
// Parse transport layer to get destination port
|
||||
var dstPort uint16
|
||||
protocol := ipv4Header.TransportProtocol()
|
||||
headerLen := int(ipv4Header.HeaderLength())
|
||||
|
||||
// Extract port based on protocol
|
||||
switch protocol {
|
||||
case header.TCPProtocolNumber:
|
||||
if len(packet) < headerLen+header.TCPMinimumSize {
|
||||
return false
|
||||
}
|
||||
tcpHeader := header.TCP(packet[headerLen:])
|
||||
dstPort = tcpHeader.DestinationPort()
|
||||
case header.UDPProtocolNumber:
|
||||
if len(packet) < headerLen+header.UDPMinimumSize {
|
||||
return false
|
||||
}
|
||||
udpHeader := header.UDP(packet[headerLen:])
|
||||
dstPort = udpHeader.DestinationPort()
|
||||
case header.ICMPv4ProtocolNumber:
|
||||
// ICMP doesn't have ports, use port 0 (must match rules with no port restrictions)
|
||||
dstPort = 0
|
||||
logger.Debug("HandleIncomingPacket: ICMP packet from %s to %s", srcAddr, dstAddr)
|
||||
default:
|
||||
// For other protocols, use port 0 (must match rules with no port restrictions)
|
||||
dstPort = 0
|
||||
logger.Debug("HandleIncomingPacket: Unknown protocol %d from %s to %s", protocol, srcAddr, dstAddr)
|
||||
}
|
||||
|
||||
// Check if the source IP, destination IP, port, and protocol match any subnet rule
|
||||
matchedRule := p.subnetLookup.Match(srcAddr, dstAddr, dstPort, protocol)
|
||||
if matchedRule != nil {
|
||||
logger.Debug("HandleIncomingPacket: Matched rule for %s -> %s (proto=%d, port=%d)",
|
||||
srcAddr, dstAddr, protocol, dstPort)
|
||||
// Check if we need to perform DNAT
|
||||
if matchedRule.RewriteTo != "" {
|
||||
// Create connection tracking key using original destination
|
||||
// This allows us to check if we've already resolved for this connection
|
||||
var srcPort uint16
|
||||
switch protocol {
|
||||
case header.TCPProtocolNumber:
|
||||
tcpHeader := header.TCP(packet[headerLen:])
|
||||
srcPort = tcpHeader.SourcePort()
|
||||
case header.UDPProtocolNumber:
|
||||
udpHeader := header.UDP(packet[headerLen:])
|
||||
srcPort = udpHeader.SourcePort()
|
||||
}
|
||||
|
||||
// Key using original destination to track the connection
|
||||
key := connKey{
|
||||
srcIP: srcAddr.String(),
|
||||
srcPort: srcPort,
|
||||
dstIP: dstAddr.String(),
|
||||
dstPort: dstPort,
|
||||
proto: uint8(protocol),
|
||||
}
|
||||
|
||||
// Key for handler lookups (doesn't include srcPort for flexibility)
|
||||
dKey := destKey{
|
||||
srcIP: srcAddr.String(),
|
||||
dstIP: dstAddr.String(),
|
||||
dstPort: dstPort,
|
||||
proto: uint8(protocol),
|
||||
}
|
||||
|
||||
// Check if we already have a NAT entry for this connection
|
||||
p.natMu.RLock()
|
||||
existingEntry, exists := p.natTable[key]
|
||||
p.natMu.RUnlock()
|
||||
|
||||
var newDst netip.Addr
|
||||
if exists {
|
||||
// Use the previously resolved address for this connection
|
||||
newDst = existingEntry.rewrittenTo
|
||||
logger.Debug("Using existing NAT entry for connection: %s -> %s", dstAddr, newDst)
|
||||
} else {
|
||||
// New connection - resolve the rewrite address
|
||||
var err error
|
||||
newDst, err = p.resolveRewriteAddress(matchedRule.RewriteTo)
|
||||
if err != nil {
|
||||
// Failed to resolve, skip DNAT but still proxy the packet
|
||||
logger.Debug("Failed to resolve rewrite address: %v", err)
|
||||
pkb := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
Payload: buffer.MakeWithData(packet),
|
||||
})
|
||||
p.proxyEp.InjectInbound(header.IPv4ProtocolNumber, pkb)
|
||||
return true
|
||||
}
|
||||
|
||||
// Store NAT state for this connection
|
||||
p.natMu.Lock()
|
||||
p.natTable[key] = &natState{
|
||||
originalDst: dstAddr,
|
||||
rewrittenTo: newDst,
|
||||
}
|
||||
// Store destination rewrite for handler lookups
|
||||
p.destRewriteTable[dKey] = newDst
|
||||
p.natMu.Unlock()
|
||||
logger.Debug("New NAT entry for connection: %s -> %s", dstAddr, newDst)
|
||||
}
|
||||
|
||||
// Check if target is loopback - if so, don't rewrite packet destination
|
||||
// as gVisor will drop martian packets. Instead, the handlers will use
|
||||
// destRewriteTable to find the actual target address.
|
||||
if !newDst.IsLoopback() {
|
||||
// Rewrite the packet only for non-loopback destinations
|
||||
packet = p.rewritePacketDestination(packet, newDst)
|
||||
if packet == nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
logger.Debug("Target is loopback, not rewriting packet - handlers will use rewrite table")
|
||||
}
|
||||
}
|
||||
|
||||
// Inject into proxy stack
|
||||
pkb := stack.NewPacketBuffer(stack.PacketBufferOptions{
|
||||
Payload: buffer.MakeWithData(packet),
|
||||
})
|
||||
p.proxyEp.InjectInbound(header.IPv4ProtocolNumber, pkb)
|
||||
logger.Debug("HandleIncomingPacket: Injected packet into proxy stack (proto=%d)", protocol)
|
||||
return true
|
||||
}
|
||||
|
||||
// logger.Debug("HandleIncomingPacket: No matching rule for %s -> %s (proto=%d, port=%d)",
|
||||
// srcAddr, dstAddr, protocol, dstPort)
|
||||
return false
|
||||
}
|
||||
|
||||
// rewritePacketDestination rewrites the destination IP in a packet and recalculates checksums
|
||||
func (p *ProxyHandler) rewritePacketDestination(packet []byte, newDst netip.Addr) []byte {
|
||||
if len(packet) < header.IPv4MinimumSize {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make a copy to avoid modifying the original
|
||||
pkt := make([]byte, len(packet))
|
||||
copy(pkt, packet)
|
||||
|
||||
ipv4Header := header.IPv4(pkt)
|
||||
headerLen := int(ipv4Header.HeaderLength())
|
||||
|
||||
// Rewrite destination IP
|
||||
newDstBytes := newDst.As4()
|
||||
newDstAddr := tcpip.AddrFrom4(newDstBytes)
|
||||
ipv4Header.SetDestinationAddress(newDstAddr)
|
||||
|
||||
// Recalculate IP checksum
|
||||
ipv4Header.SetChecksum(0)
|
||||
ipv4Header.SetChecksum(^ipv4Header.CalculateChecksum())
|
||||
|
||||
// Update transport layer checksum if needed
|
||||
protocol := ipv4Header.TransportProtocol()
|
||||
switch protocol {
|
||||
case header.TCPProtocolNumber:
|
||||
if len(pkt) >= headerLen+header.TCPMinimumSize {
|
||||
tcpHeader := header.TCP(pkt[headerLen:])
|
||||
tcpHeader.SetChecksum(0)
|
||||
xsum := header.PseudoHeaderChecksum(
|
||||
header.TCPProtocolNumber,
|
||||
ipv4Header.SourceAddress(),
|
||||
ipv4Header.DestinationAddress(),
|
||||
uint16(len(pkt)-headerLen),
|
||||
)
|
||||
xsum = checksum.Checksum(pkt[headerLen:], xsum)
|
||||
tcpHeader.SetChecksum(^xsum)
|
||||
}
|
||||
case header.UDPProtocolNumber:
|
||||
if len(pkt) >= headerLen+header.UDPMinimumSize {
|
||||
udpHeader := header.UDP(pkt[headerLen:])
|
||||
udpHeader.SetChecksum(0)
|
||||
xsum := header.PseudoHeaderChecksum(
|
||||
header.UDPProtocolNumber,
|
||||
ipv4Header.SourceAddress(),
|
||||
ipv4Header.DestinationAddress(),
|
||||
uint16(len(pkt)-headerLen),
|
||||
)
|
||||
xsum = checksum.Checksum(pkt[headerLen:], xsum)
|
||||
udpHeader.SetChecksum(^xsum)
|
||||
}
|
||||
}
|
||||
|
||||
return pkt
|
||||
}
|
||||
|
||||
// rewritePacketSource rewrites the source IP in a packet and recalculates checksums (for reverse NAT)
|
||||
func (p *ProxyHandler) rewritePacketSource(packet []byte, newSrc netip.Addr) []byte {
|
||||
if len(packet) < header.IPv4MinimumSize {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make a copy to avoid modifying the original
|
||||
pkt := make([]byte, len(packet))
|
||||
copy(pkt, packet)
|
||||
|
||||
ipv4Header := header.IPv4(pkt)
|
||||
headerLen := int(ipv4Header.HeaderLength())
|
||||
|
||||
// Rewrite source IP
|
||||
newSrcBytes := newSrc.As4()
|
||||
newSrcAddr := tcpip.AddrFrom4(newSrcBytes)
|
||||
ipv4Header.SetSourceAddress(newSrcAddr)
|
||||
|
||||
// Recalculate IP checksum
|
||||
ipv4Header.SetChecksum(0)
|
||||
ipv4Header.SetChecksum(^ipv4Header.CalculateChecksum())
|
||||
|
||||
// Update transport layer checksum if needed
|
||||
protocol := ipv4Header.TransportProtocol()
|
||||
switch protocol {
|
||||
case header.TCPProtocolNumber:
|
||||
if len(pkt) >= headerLen+header.TCPMinimumSize {
|
||||
tcpHeader := header.TCP(pkt[headerLen:])
|
||||
tcpHeader.SetChecksum(0)
|
||||
xsum := header.PseudoHeaderChecksum(
|
||||
header.TCPProtocolNumber,
|
||||
ipv4Header.SourceAddress(),
|
||||
ipv4Header.DestinationAddress(),
|
||||
uint16(len(pkt)-headerLen),
|
||||
)
|
||||
xsum = checksum.Checksum(pkt[headerLen:], xsum)
|
||||
tcpHeader.SetChecksum(^xsum)
|
||||
}
|
||||
case header.UDPProtocolNumber:
|
||||
if len(pkt) >= headerLen+header.UDPMinimumSize {
|
||||
udpHeader := header.UDP(pkt[headerLen:])
|
||||
udpHeader.SetChecksum(0)
|
||||
xsum := header.PseudoHeaderChecksum(
|
||||
header.UDPProtocolNumber,
|
||||
ipv4Header.SourceAddress(),
|
||||
ipv4Header.DestinationAddress(),
|
||||
uint16(len(pkt)-headerLen),
|
||||
)
|
||||
xsum = checksum.Checksum(pkt[headerLen:], xsum)
|
||||
udpHeader.SetChecksum(^xsum)
|
||||
}
|
||||
}
|
||||
|
||||
return pkt
|
||||
}
|
||||
|
||||
// ReadOutgoingPacket reads packets from the proxy stack that need to be
|
||||
// sent back through the tunnel
|
||||
func (p *ProxyHandler) ReadOutgoingPacket() *buffer.View {
|
||||
if p == nil || !p.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First check for ICMP reply packets (non-blocking)
|
||||
select {
|
||||
case icmpReply := <-p.icmpReplies:
|
||||
logger.Debug("ReadOutgoingPacket: Returning ICMP reply packet (%d bytes)", len(icmpReply))
|
||||
return buffer.NewViewWithData(icmpReply)
|
||||
default:
|
||||
// No ICMP reply available, continue to check proxy endpoint
|
||||
}
|
||||
|
||||
pkt := p.proxyEp.Read()
|
||||
if pkt != nil {
|
||||
view := pkt.ToView()
|
||||
pkt.DecRef()
|
||||
|
||||
// Check if we need to perform reverse NAT
|
||||
packet := view.AsSlice()
|
||||
if len(packet) >= header.IPv4MinimumSize && packet[0]>>4 == 4 {
|
||||
ipv4Header := header.IPv4(packet)
|
||||
srcIP := ipv4Header.SourceAddress()
|
||||
dstIP := ipv4Header.DestinationAddress()
|
||||
protocol := ipv4Header.TransportProtocol()
|
||||
headerLen := int(ipv4Header.HeaderLength())
|
||||
|
||||
// Extract ports
|
||||
var srcPort, dstPort uint16
|
||||
switch protocol {
|
||||
case header.TCPProtocolNumber:
|
||||
if len(packet) >= headerLen+header.TCPMinimumSize {
|
||||
tcpHeader := header.TCP(packet[headerLen:])
|
||||
srcPort = tcpHeader.SourcePort()
|
||||
dstPort = tcpHeader.DestinationPort()
|
||||
}
|
||||
case header.UDPProtocolNumber:
|
||||
if len(packet) >= headerLen+header.UDPMinimumSize {
|
||||
udpHeader := header.UDP(packet[headerLen:])
|
||||
srcPort = udpHeader.SourcePort()
|
||||
dstPort = udpHeader.DestinationPort()
|
||||
}
|
||||
case header.ICMPv4ProtocolNumber:
|
||||
// ICMP packets don't need NAT translation in our implementation
|
||||
// since we construct reply packets with the correct addresses
|
||||
logger.Debug("ReadOutgoingPacket: ICMP packet from %s to %s", srcIP, dstIP)
|
||||
return view
|
||||
}
|
||||
|
||||
// Look up NAT state for reverse translation
|
||||
// The key uses the original dst (before rewrite), so for replies we need to
|
||||
// find the entry where the rewritten address matches the current source
|
||||
p.natMu.RLock()
|
||||
var natEntry *natState
|
||||
for k, entry := range p.natTable {
|
||||
// Match: reply's dst should be original src, reply's src should be rewritten dst
|
||||
if k.srcIP == dstIP.String() && k.srcPort == dstPort &&
|
||||
entry.rewrittenTo.String() == srcIP.String() && k.dstPort == srcPort &&
|
||||
k.proto == uint8(protocol) {
|
||||
natEntry = entry
|
||||
break
|
||||
}
|
||||
}
|
||||
p.natMu.RUnlock()
|
||||
|
||||
if natEntry != nil {
|
||||
// Perform reverse NAT - rewrite source to original destination
|
||||
packet = p.rewritePacketSource(packet, natEntry.originalDst)
|
||||
if packet != nil {
|
||||
return buffer.NewViewWithData(packet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return view
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueueICMPReply queues an ICMP reply packet to be sent back through the tunnel
|
||||
func (p *ProxyHandler) QueueICMPReply(packet []byte) bool {
|
||||
if p == nil || !p.enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case p.icmpReplies <- packet:
|
||||
logger.Debug("QueueICMPReply: Queued ICMP reply packet (%d bytes)", len(packet))
|
||||
// Trigger notification so WriteNotify picks up the packet
|
||||
if p.notifiable != nil {
|
||||
p.notifiable.WriteNotify()
|
||||
}
|
||||
return true
|
||||
default:
|
||||
logger.Info("QueueICMPReply: ICMP reply channel full, dropping packet")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Close cleans up the proxy handler resources
|
||||
func (p *ProxyHandler) Close() error {
|
||||
if p == nil || !p.enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close ICMP replies channel
|
||||
if p.icmpReplies != nil {
|
||||
close(p.icmpReplies)
|
||||
}
|
||||
|
||||
if p.proxyStack != nil {
|
||||
p.proxyStack.RemoveNIC(1)
|
||||
p.proxyStack.Close()
|
||||
}
|
||||
|
||||
if p.proxyEp != nil {
|
||||
if p.proxyNotifyHandle != nil {
|
||||
p.proxyEp.RemoveNotify(p.proxyNotifyHandle)
|
||||
}
|
||||
p.proxyEp.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
1152
netstack2/tun.go
Normal file
1152
netstack2/tun.go
Normal file
File diff suppressed because it is too large
Load Diff
169
network/interface.go
Normal file
169
network/interface.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
// ConfigureInterface configures a network interface with an IP address and brings it up
|
||||
func ConfigureInterface(interfaceName string, tunnelIp string, mtu int) error {
|
||||
logger.Info("The tunnel IP is: %s", tunnelIp)
|
||||
|
||||
// Parse the IP address and network
|
||||
ip, ipNet, err := net.ParseCIDR(tunnelIp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid IP address: %v", err)
|
||||
}
|
||||
|
||||
// Convert CIDR mask to dotted decimal format (e.g., 255.255.255.0)
|
||||
mask := net.IP(ipNet.Mask).String()
|
||||
destinationAddress := ip.String()
|
||||
|
||||
logger.Debug("The destination address is: %s", destinationAddress)
|
||||
|
||||
// network.SetTunnelRemoteAddress() // what does this do?
|
||||
SetIPv4Settings([]string{destinationAddress}, []string{mask})
|
||||
SetMTU(mtu)
|
||||
|
||||
if interfaceName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
return configureLinux(interfaceName, ip, ipNet)
|
||||
case "darwin":
|
||||
return configureDarwin(interfaceName, ip, ipNet)
|
||||
case "windows":
|
||||
return configureWindows(interfaceName, ip, ipNet)
|
||||
case "android":
|
||||
return nil
|
||||
case "ios":
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForInterfaceUp polls the network interface until it's up or times out
|
||||
func waitForInterfaceUp(interfaceName string, expectedIP net.IP, timeout time.Duration) error {
|
||||
logger.Info("Waiting for interface %s to be up with IP %s", interfaceName, expectedIP)
|
||||
deadline := time.Now().Add(timeout)
|
||||
pollInterval := 500 * time.Millisecond
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if interface exists and is up
|
||||
iface, err := net.InterfaceByName(interfaceName)
|
||||
if err == nil {
|
||||
// Check if interface is up
|
||||
if iface.Flags&net.FlagUp != 0 {
|
||||
// Check if it has the expected IP
|
||||
addrs, err := iface.Addrs()
|
||||
if err == nil {
|
||||
for _, addr := range addrs {
|
||||
ipNet, ok := addr.(*net.IPNet)
|
||||
if ok && ipNet.IP.Equal(expectedIP) {
|
||||
logger.Info("Interface %s is up with correct IP", interfaceName)
|
||||
return nil // Interface is up with correct IP
|
||||
}
|
||||
}
|
||||
logger.Info("Interface %s is up but doesn't have expected IP yet", interfaceName)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Interface %s exists but is not up yet", interfaceName)
|
||||
}
|
||||
} else {
|
||||
logger.Info("Interface %s not found yet: %v", interfaceName, err)
|
||||
}
|
||||
|
||||
// Wait before next check
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timed out waiting for interface %s to be up with IP %s", interfaceName, expectedIP)
|
||||
}
|
||||
|
||||
func FindUnusedUTUN() (string, error) {
|
||||
ifaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list interfaces: %v", err)
|
||||
}
|
||||
used := make(map[int]bool)
|
||||
re := regexp.MustCompile(`^utun(\d+)$`)
|
||||
for _, iface := range ifaces {
|
||||
if matches := re.FindStringSubmatch(iface.Name); len(matches) == 2 {
|
||||
if num, err := strconv.Atoi(matches[1]); err == nil {
|
||||
used[num] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
// Try utun0 up to utun255.
|
||||
for i := 0; i < 256; i++ {
|
||||
if !used[i] {
|
||||
return fmt.Sprintf("utun%d", i), nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no unused utun interface found")
|
||||
}
|
||||
|
||||
func configureDarwin(interfaceName string, ip net.IP, ipNet *net.IPNet) error {
|
||||
logger.Info("Configuring darwin interface: %s", interfaceName)
|
||||
|
||||
prefix, _ := ipNet.Mask.Size()
|
||||
ipStr := fmt.Sprintf("%s/%d", ip.String(), prefix)
|
||||
|
||||
cmd := exec.Command("ifconfig", interfaceName, "inet", ipStr, ip.String(), "alias")
|
||||
logger.Info("Running command: %v", cmd)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ifconfig command failed: %v, output: %s", err, out)
|
||||
}
|
||||
|
||||
// Bring up the interface
|
||||
cmd = exec.Command("ifconfig", interfaceName, "up")
|
||||
logger.Info("Running command: %v", cmd)
|
||||
|
||||
out, err = cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("ifconfig up command failed: %v, output: %s", err, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func configureLinux(interfaceName string, ip net.IP, ipNet *net.IPNet) error {
|
||||
// Get the interface
|
||||
link, err := netlink.LinkByName(interfaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get interface %s: %v", interfaceName, err)
|
||||
}
|
||||
|
||||
// Create the IP address attributes
|
||||
addr := &netlink.Addr{
|
||||
IPNet: &net.IPNet{
|
||||
IP: ip,
|
||||
Mask: ipNet.Mask,
|
||||
},
|
||||
}
|
||||
|
||||
// Add the IP address to the interface
|
||||
if err := netlink.AddrAdd(link, addr); err != nil {
|
||||
return fmt.Errorf("failed to add IP address: %v", err)
|
||||
}
|
||||
|
||||
// Bring up the interface
|
||||
if err := netlink.LinkSetUp(link); err != nil {
|
||||
return fmt.Errorf("failed to bring up interface: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
12
network/interface_notwindows.go
Normal file
12
network/interface_notwindows.go
Normal file
@@ -0,0 +1,12 @@
|
||||
//go:build !windows
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
func configureWindows(interfaceName string, ip net.IP, ipNet *net.IPNet) error {
|
||||
return fmt.Errorf("configureWindows called on non-Windows platform")
|
||||
}
|
||||
63
network/interface_windows.go
Normal file
63
network/interface_windows.go
Normal file
@@ -0,0 +1,63 @@
|
||||
//go:build windows
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
|
||||
)
|
||||
|
||||
func configureWindows(interfaceName string, ip net.IP, ipNet *net.IPNet) error {
|
||||
logger.Info("Configuring Windows interface: %s", interfaceName)
|
||||
|
||||
// Get the LUID for the interface
|
||||
iface, err := net.InterfaceByName(interfaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get interface %s: %v", interfaceName, err)
|
||||
}
|
||||
|
||||
luid, err := winipcfg.LUIDFromIndex(uint32(iface.Index))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get LUID for interface %s: %v", interfaceName, err)
|
||||
}
|
||||
|
||||
// Create the IP address prefix
|
||||
maskBits, _ := ipNet.Mask.Size()
|
||||
|
||||
// Ensure we convert to the correct IP version (IPv4 vs IPv6)
|
||||
var addr netip.Addr
|
||||
if ip4 := ip.To4(); ip4 != nil {
|
||||
// IPv4 address
|
||||
addr, _ = netip.AddrFromSlice(ip4)
|
||||
} else {
|
||||
// IPv6 address
|
||||
addr, _ = netip.AddrFromSlice(ip)
|
||||
}
|
||||
if !addr.IsValid() {
|
||||
return fmt.Errorf("failed to convert IP address")
|
||||
}
|
||||
prefix := netip.PrefixFrom(addr, maskBits)
|
||||
|
||||
// Add the IP address to the interface
|
||||
logger.Info("Adding IP address %s to interface %s", prefix.String(), interfaceName)
|
||||
err = luid.AddIPAddress(prefix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add IP address: %v", err)
|
||||
}
|
||||
|
||||
// This was required when we were using the subprocess "netsh" command to bring up the interface.
|
||||
// With the winipcfg library, the interface should already be up after adding the IP so we dont
|
||||
// need this step anymore as far as I can tell.
|
||||
|
||||
// // Wait for the interface to be up and have the correct IP
|
||||
// err = waitForInterfaceUp(interfaceName, ip, 30*time.Second)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("interface did not come up within timeout: %v", err)
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
286
network/route.go
Normal file
286
network/route.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
func DarwinAddRoute(destination string, gateway string, interfaceName string) error {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cmd *exec.Cmd
|
||||
|
||||
if gateway != "" {
|
||||
// Route with specific gateway
|
||||
cmd = exec.Command("route", "-q", "-n", "add", "-inet", destination, "-gateway", gateway)
|
||||
} else if interfaceName != "" {
|
||||
// Route via interface
|
||||
cmd = exec.Command("route", "-q", "-n", "add", "-inet", destination, "-interface", interfaceName)
|
||||
} else {
|
||||
return fmt.Errorf("either gateway or interface must be specified")
|
||||
}
|
||||
|
||||
logger.Info("Running command: %v", cmd)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("route command failed: %v, output: %s", err, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func DarwinRemoveRoute(destination string) error {
|
||||
if runtime.GOOS != "darwin" {
|
||||
return nil
|
||||
}
|
||||
|
||||
cmd := exec.Command("route", "-q", "-n", "delete", "-inet", destination)
|
||||
logger.Info("Running command: %v", cmd)
|
||||
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("route delete command failed: %v, output: %s", err, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LinuxAddRoute(destination string, gateway string, interfaceName string) error {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse destination CIDR
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid destination address: %v", err)
|
||||
}
|
||||
|
||||
// Create route
|
||||
route := &netlink.Route{
|
||||
Dst: ipNet,
|
||||
}
|
||||
|
||||
if gateway != "" {
|
||||
// Route with specific gateway
|
||||
gw := net.ParseIP(gateway)
|
||||
if gw == nil {
|
||||
return fmt.Errorf("invalid gateway address: %s", gateway)
|
||||
}
|
||||
route.Gw = gw
|
||||
logger.Info("Adding route to %s via gateway %s", destination, gateway)
|
||||
} else if interfaceName != "" {
|
||||
// Route via interface
|
||||
link, err := netlink.LinkByName(interfaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get interface %s: %v", interfaceName, err)
|
||||
}
|
||||
route.LinkIndex = link.Attrs().Index
|
||||
logger.Info("Adding route to %s via interface %s", destination, interfaceName)
|
||||
} else {
|
||||
return fmt.Errorf("either gateway or interface must be specified")
|
||||
}
|
||||
|
||||
// Add the route
|
||||
if err := netlink.RouteAdd(route); err != nil {
|
||||
return fmt.Errorf("failed to add route: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func LinuxRemoveRoute(destination string) error {
|
||||
if runtime.GOOS != "linux" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse destination CIDR
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid destination address: %v", err)
|
||||
}
|
||||
|
||||
// Create route to delete
|
||||
route := &netlink.Route{
|
||||
Dst: ipNet,
|
||||
}
|
||||
|
||||
logger.Info("Removing route to %s", destination)
|
||||
|
||||
// Delete the route
|
||||
if err := netlink.RouteDel(route); err != nil {
|
||||
return fmt.Errorf("failed to delete route: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addRouteForServerIP adds an OS-specific route for the server IP
|
||||
func AddRouteForServerIP(serverIP, interfaceName string) error {
|
||||
if interfaceName == "" {
|
||||
return nil
|
||||
}
|
||||
// TODO: does this also need to be ios?
|
||||
if runtime.GOOS == "darwin" { // macos requires routes for each peer to be added but this messes with other platforms
|
||||
if err := AddRouteForNetworkConfig(serverIP); err != nil {
|
||||
return err
|
||||
}
|
||||
return DarwinAddRoute(serverIP, "", interfaceName)
|
||||
}
|
||||
// else if runtime.GOOS == "windows" {
|
||||
// return WindowsAddRoute(serverIP, "", interfaceName)
|
||||
// } else if runtime.GOOS == "linux" {
|
||||
// return LinuxAddRoute(serverIP, "", interfaceName)
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeRouteForServerIP removes an OS-specific route for the server IP
|
||||
func RemoveRouteForServerIP(serverIP string, interfaceName string) error {
|
||||
if interfaceName == "" {
|
||||
return nil
|
||||
}
|
||||
// TODO: does this also need to be ios?
|
||||
if runtime.GOOS == "darwin" { // macos requires routes for each peer to be added but this messes with other platforms
|
||||
if err := RemoveRouteForNetworkConfig(serverIP); err != nil {
|
||||
return err
|
||||
}
|
||||
return DarwinRemoveRoute(serverIP)
|
||||
}
|
||||
// else if runtime.GOOS == "windows" {
|
||||
// return WindowsRemoveRoute(serverIP)
|
||||
// } else if runtime.GOOS == "linux" {
|
||||
// return LinuxRemoveRoute(serverIP)
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddRouteForNetworkConfig(destination string) error {
|
||||
// Parse the subnet to extract IP and mask
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse subnet %s: %v", destination, err)
|
||||
}
|
||||
|
||||
// Convert CIDR mask to dotted decimal format (e.g., 255.255.255.0)
|
||||
mask := net.IP(ipNet.Mask).String()
|
||||
destinationAddress := ipNet.IP.String()
|
||||
|
||||
AddIPv4IncludedRoute(IPv4Route{DestinationAddress: destinationAddress, SubnetMask: mask})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveRouteForNetworkConfig(destination string) error {
|
||||
// Parse the subnet to extract IP and mask
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse subnet %s: %v", destination, err)
|
||||
}
|
||||
|
||||
// Convert CIDR mask to dotted decimal format (e.g., 255.255.255.0)
|
||||
mask := net.IP(ipNet.Mask).String()
|
||||
destinationAddress := ipNet.IP.String()
|
||||
|
||||
RemoveIPv4IncludedRoute(IPv4Route{DestinationAddress: destinationAddress, SubnetMask: mask})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addRoutes adds routes for each subnet in RemoteSubnets
|
||||
func AddRoutes(remoteSubnets []string, interfaceName string) error {
|
||||
if len(remoteSubnets) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add routes for each subnet
|
||||
for _, subnet := range remoteSubnets {
|
||||
subnet = strings.TrimSpace(subnet)
|
||||
if subnet == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := AddRouteForNetworkConfig(subnet); err != nil {
|
||||
logger.Error("Failed to add network config for subnet %s: %v", subnet, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Add route based on operating system
|
||||
if interfaceName == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
if err := DarwinAddRoute(subnet, "", interfaceName); err != nil {
|
||||
logger.Error("Failed to add Darwin route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "windows":
|
||||
if err := WindowsAddRoute(subnet, "", interfaceName); err != nil {
|
||||
logger.Error("Failed to add Windows route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "linux":
|
||||
if err := LinuxAddRoute(subnet, "", interfaceName); err != nil {
|
||||
logger.Error("Failed to add Linux route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "android", "ios":
|
||||
// Routes handled by the OS/VPN service
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Added route for remote subnet: %s", subnet)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeRoutesForRemoteSubnets removes routes for each subnet in RemoteSubnets
|
||||
func RemoveRoutes(remoteSubnets []string) error {
|
||||
if len(remoteSubnets) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove routes for each subnet
|
||||
for _, subnet := range remoteSubnets {
|
||||
subnet = strings.TrimSpace(subnet)
|
||||
if subnet == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := RemoveRouteForNetworkConfig(subnet); err != nil {
|
||||
logger.Error("Failed to remove network config for subnet %s: %v", subnet, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Remove route based on operating system
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
if err := DarwinRemoveRoute(subnet); err != nil {
|
||||
logger.Error("Failed to remove Darwin route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "windows":
|
||||
if err := WindowsRemoveRoute(subnet); err != nil {
|
||||
logger.Error("Failed to remove Windows route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "linux":
|
||||
if err := LinuxRemoveRoute(subnet); err != nil {
|
||||
logger.Error("Failed to remove Linux route for subnet %s: %v", subnet, err)
|
||||
}
|
||||
case "android", "ios":
|
||||
// Routes handled by the OS/VPN service
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("Removed route for remote subnet: %s", subnet)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
11
network/route_notwindows.go
Normal file
11
network/route_notwindows.go
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !windows
|
||||
|
||||
package network
|
||||
|
||||
func WindowsAddRoute(destination string, gateway string, interfaceName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func WindowsRemoveRoute(destination string) error {
|
||||
return nil
|
||||
}
|
||||
148
network/route_windows.go
Normal file
148
network/route_windows.go
Normal file
@@ -0,0 +1,148 @@
|
||||
//go:build windows
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"runtime"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.zx2c4.com/wireguard/windows/tunnel/winipcfg"
|
||||
)
|
||||
|
||||
func WindowsAddRoute(destination string, gateway string, interfaceName string) error {
|
||||
if runtime.GOOS != "windows" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parse destination CIDR
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid destination address: %v", err)
|
||||
}
|
||||
|
||||
// Convert to netip.Prefix
|
||||
maskBits, _ := ipNet.Mask.Size()
|
||||
|
||||
// Ensure we convert to the correct IP version (IPv4 vs IPv6)
|
||||
var addr netip.Addr
|
||||
if ip4 := ipNet.IP.To4(); ip4 != nil {
|
||||
// IPv4 address
|
||||
addr, _ = netip.AddrFromSlice(ip4)
|
||||
} else {
|
||||
// IPv6 address
|
||||
addr, _ = netip.AddrFromSlice(ipNet.IP)
|
||||
}
|
||||
if !addr.IsValid() {
|
||||
return fmt.Errorf("failed to convert destination IP")
|
||||
}
|
||||
prefix := netip.PrefixFrom(addr, maskBits)
|
||||
|
||||
var luid winipcfg.LUID
|
||||
var nextHop netip.Addr
|
||||
|
||||
if interfaceName != "" {
|
||||
// Get the interface LUID - needed for both gateway and interface-only routes
|
||||
iface, err := net.InterfaceByName(interfaceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get interface %s: %v", interfaceName, err)
|
||||
}
|
||||
|
||||
luid, err = winipcfg.LUIDFromIndex(uint32(iface.Index))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get LUID for interface %s: %v", interfaceName, err)
|
||||
}
|
||||
}
|
||||
|
||||
if gateway != "" {
|
||||
// Route with specific gateway
|
||||
gwIP := net.ParseIP(gateway)
|
||||
if gwIP == nil {
|
||||
return fmt.Errorf("invalid gateway address: %s", gateway)
|
||||
}
|
||||
// Convert to correct IP version
|
||||
if ip4 := gwIP.To4(); ip4 != nil {
|
||||
nextHop, _ = netip.AddrFromSlice(ip4)
|
||||
} else {
|
||||
nextHop, _ = netip.AddrFromSlice(gwIP)
|
||||
}
|
||||
if !nextHop.IsValid() {
|
||||
return fmt.Errorf("failed to convert gateway IP")
|
||||
}
|
||||
logger.Info("Adding route to %s via gateway %s on interface %s", destination, gateway, interfaceName)
|
||||
} else if interfaceName != "" {
|
||||
// Route via interface only
|
||||
if addr.Is4() {
|
||||
nextHop = netip.IPv4Unspecified()
|
||||
} else {
|
||||
nextHop = netip.IPv6Unspecified()
|
||||
}
|
||||
logger.Info("Adding route to %s via interface %s", destination, interfaceName)
|
||||
} else {
|
||||
return fmt.Errorf("either gateway or interface must be specified")
|
||||
}
|
||||
|
||||
// Add the route using winipcfg
|
||||
err = luid.AddRoute(prefix, nextHop, 1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add route: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func WindowsRemoveRoute(destination string) error {
|
||||
// Parse destination CIDR
|
||||
_, ipNet, err := net.ParseCIDR(destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid destination address: %v", err)
|
||||
}
|
||||
|
||||
// Convert to netip.Prefix
|
||||
maskBits, _ := ipNet.Mask.Size()
|
||||
|
||||
// Ensure we convert to the correct IP version (IPv4 vs IPv6)
|
||||
var addr netip.Addr
|
||||
if ip4 := ipNet.IP.To4(); ip4 != nil {
|
||||
// IPv4 address
|
||||
addr, _ = netip.AddrFromSlice(ip4)
|
||||
} else {
|
||||
// IPv6 address
|
||||
addr, _ = netip.AddrFromSlice(ipNet.IP)
|
||||
}
|
||||
if !addr.IsValid() {
|
||||
return fmt.Errorf("failed to convert destination IP")
|
||||
}
|
||||
prefix := netip.PrefixFrom(addr, maskBits)
|
||||
|
||||
// Get all routes and find the one to delete
|
||||
// We need to get the LUID from the existing route
|
||||
var family winipcfg.AddressFamily
|
||||
if addr.Is4() {
|
||||
family = 2 // AF_INET
|
||||
} else {
|
||||
family = 23 // AF_INET6
|
||||
}
|
||||
|
||||
routes, err := winipcfg.GetIPForwardTable2(family)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get route table: %v", err)
|
||||
}
|
||||
|
||||
// Find and delete matching route
|
||||
for _, route := range routes {
|
||||
routePrefix := route.DestinationPrefix.Prefix()
|
||||
if routePrefix == prefix {
|
||||
logger.Info("Removing route to %s", destination)
|
||||
err = route.Delete()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete route: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("route to %s not found", destination)
|
||||
}
|
||||
190
network/settings.go
Normal file
190
network/settings.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package network
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
)
|
||||
|
||||
// NetworkSettings represents the network configuration for the tunnel
|
||||
type NetworkSettings struct {
|
||||
TunnelRemoteAddress string `json:"tunnel_remote_address,omitempty"`
|
||||
MTU *int `json:"mtu,omitempty"`
|
||||
DNSServers []string `json:"dns_servers,omitempty"`
|
||||
IPv4Addresses []string `json:"ipv4_addresses,omitempty"`
|
||||
IPv4SubnetMasks []string `json:"ipv4_subnet_masks,omitempty"`
|
||||
IPv4IncludedRoutes []IPv4Route `json:"ipv4_included_routes,omitempty"`
|
||||
IPv4ExcludedRoutes []IPv4Route `json:"ipv4_excluded_routes,omitempty"`
|
||||
IPv6Addresses []string `json:"ipv6_addresses,omitempty"`
|
||||
IPv6NetworkPrefixes []string `json:"ipv6_network_prefixes,omitempty"`
|
||||
IPv6IncludedRoutes []IPv6Route `json:"ipv6_included_routes,omitempty"`
|
||||
IPv6ExcludedRoutes []IPv6Route `json:"ipv6_excluded_routes,omitempty"`
|
||||
}
|
||||
|
||||
// IPv4Route represents an IPv4 route
|
||||
type IPv4Route struct {
|
||||
DestinationAddress string `json:"destination_address"`
|
||||
SubnetMask string `json:"subnet_mask,omitempty"`
|
||||
GatewayAddress string `json:"gateway_address,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
}
|
||||
|
||||
// IPv6Route represents an IPv6 route
|
||||
type IPv6Route struct {
|
||||
DestinationAddress string `json:"destination_address"`
|
||||
NetworkPrefixLength int `json:"network_prefix_length,omitempty"`
|
||||
GatewayAddress string `json:"gateway_address,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
}
|
||||
|
||||
var (
|
||||
networkSettings NetworkSettings
|
||||
networkSettingsMutex sync.RWMutex
|
||||
incrementor int
|
||||
)
|
||||
|
||||
// SetTunnelRemoteAddress sets the tunnel remote address
|
||||
func SetTunnelRemoteAddress(address string) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.TunnelRemoteAddress = address
|
||||
incrementor++
|
||||
logger.Info("Set tunnel remote address: %s", address)
|
||||
}
|
||||
|
||||
// SetMTU sets the MTU value
|
||||
func SetMTU(mtu int) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.MTU = &mtu
|
||||
incrementor++
|
||||
logger.Info("Set MTU: %d", mtu)
|
||||
}
|
||||
|
||||
// SetDNSServers sets the DNS servers
|
||||
func SetDNSServers(servers []string) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.DNSServers = servers
|
||||
incrementor++
|
||||
logger.Info("Set DNS servers: %v", servers)
|
||||
}
|
||||
|
||||
// SetIPv4Settings sets IPv4 addresses and subnet masks
|
||||
func SetIPv4Settings(addresses []string, subnetMasks []string) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv4Addresses = addresses
|
||||
networkSettings.IPv4SubnetMasks = subnetMasks
|
||||
incrementor++
|
||||
logger.Info("Set IPv4 addresses: %v, subnet masks: %v", addresses, subnetMasks)
|
||||
}
|
||||
|
||||
// SetIPv4IncludedRoutes sets the included IPv4 routes
|
||||
func SetIPv4IncludedRoutes(routes []IPv4Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv4IncludedRoutes = routes
|
||||
incrementor++
|
||||
logger.Info("Set IPv4 included routes: %d routes", len(routes))
|
||||
}
|
||||
|
||||
func AddIPv4IncludedRoute(route IPv4Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
|
||||
// make sure it does not already exist
|
||||
for _, r := range networkSettings.IPv4IncludedRoutes {
|
||||
if r == route {
|
||||
logger.Info("IPv4 included route already exists: %+v", route)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
networkSettings.IPv4IncludedRoutes = append(networkSettings.IPv4IncludedRoutes, route)
|
||||
incrementor++
|
||||
logger.Info("Added IPv4 included route: %+v", route)
|
||||
}
|
||||
|
||||
func RemoveIPv4IncludedRoute(route IPv4Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
routes := networkSettings.IPv4IncludedRoutes
|
||||
for i, r := range routes {
|
||||
if r == route {
|
||||
networkSettings.IPv4IncludedRoutes = append(routes[:i], routes[i+1:]...)
|
||||
logger.Info("Removed IPv4 included route: %+v", route)
|
||||
break
|
||||
}
|
||||
}
|
||||
incrementor++
|
||||
logger.Info("IPv4 included route not found for removal: %+v", route)
|
||||
}
|
||||
|
||||
func SetIPv4ExcludedRoutes(routes []IPv4Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv4ExcludedRoutes = routes
|
||||
incrementor++
|
||||
logger.Info("Set IPv4 excluded routes: %d routes", len(routes))
|
||||
}
|
||||
|
||||
// SetIPv6Settings sets IPv6 addresses and network prefixes
|
||||
func SetIPv6Settings(addresses []string, networkPrefixes []string) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv6Addresses = addresses
|
||||
networkSettings.IPv6NetworkPrefixes = networkPrefixes
|
||||
incrementor++
|
||||
logger.Info("Set IPv6 addresses: %v, network prefixes: %v", addresses, networkPrefixes)
|
||||
}
|
||||
|
||||
// SetIPv6IncludedRoutes sets the included IPv6 routes
|
||||
func SetIPv6IncludedRoutes(routes []IPv6Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv6IncludedRoutes = routes
|
||||
incrementor++
|
||||
logger.Info("Set IPv6 included routes: %d routes", len(routes))
|
||||
}
|
||||
|
||||
// SetIPv6ExcludedRoutes sets the excluded IPv6 routes
|
||||
func SetIPv6ExcludedRoutes(routes []IPv6Route) {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings.IPv6ExcludedRoutes = routes
|
||||
incrementor++
|
||||
logger.Info("Set IPv6 excluded routes: %d routes", len(routes))
|
||||
}
|
||||
|
||||
// ClearNetworkSettings clears all network settings
|
||||
func ClearNetworkSettings() {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
networkSettings = NetworkSettings{}
|
||||
incrementor++
|
||||
logger.Info("Cleared all network settings")
|
||||
}
|
||||
|
||||
func GetJSON() (string, error) {
|
||||
networkSettingsMutex.RLock()
|
||||
defer networkSettingsMutex.RUnlock()
|
||||
data, err := json.MarshalIndent(networkSettings, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
func GetSettings() NetworkSettings {
|
||||
networkSettingsMutex.RLock()
|
||||
defer networkSettingsMutex.RUnlock()
|
||||
return networkSettings
|
||||
}
|
||||
|
||||
func GetIncrementor() int {
|
||||
networkSettingsMutex.Lock()
|
||||
defer networkSettingsMutex.Unlock()
|
||||
return incrementor
|
||||
}
|
||||
152
newt.iss
Normal file
152
newt.iss
Normal file
@@ -0,0 +1,152 @@
|
||||
; Script generated by the Inno Setup Script Wizard.
|
||||
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
|
||||
|
||||
#define MyAppName "newt"
|
||||
#define MyAppVersion "1.0.0"
|
||||
#define MyAppPublisher "Fossorial Inc."
|
||||
#define MyAppURL "https://pangolin.net"
|
||||
#define MyAppExeName "newt.exe"
|
||||
|
||||
[Setup]
|
||||
; NOTE: The value of AppId uniquely identifies this application. Do not use the same AppId value in installers for other applications.
|
||||
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
|
||||
AppId={{25A1E3C4-F273-4334-8DF3-47408E83012D}
|
||||
AppName={#MyAppName}
|
||||
AppVersion={#MyAppVersion}
|
||||
;AppVerName={#MyAppName} {#MyAppVersion}
|
||||
AppPublisher={#MyAppPublisher}
|
||||
AppPublisherURL={#MyAppURL}
|
||||
AppSupportURL={#MyAppURL}
|
||||
AppUpdatesURL={#MyAppURL}
|
||||
DefaultDirName={autopf}\{#MyAppName}
|
||||
UninstallDisplayIcon={app}\{#MyAppExeName}
|
||||
; "ArchitecturesAllowed=x64compatible" specifies that Setup cannot run
|
||||
; on anything but x64 and Windows 11 on Arm.
|
||||
ArchitecturesAllowed=x64compatible
|
||||
; "ArchitecturesInstallIn64BitMode=x64compatible" requests that the
|
||||
; install be done in "64-bit mode" on x64 or Windows 11 on Arm,
|
||||
; meaning it should use the native 64-bit Program Files directory and
|
||||
; the 64-bit view of the registry.
|
||||
ArchitecturesInstallIn64BitMode=x64compatible
|
||||
DefaultGroupName={#MyAppName}
|
||||
DisableProgramGroupPage=yes
|
||||
; Uncomment the following line to run in non administrative install mode (install for current user only).
|
||||
;PrivilegesRequired=lowest
|
||||
OutputBaseFilename=mysetup
|
||||
SolidCompression=yes
|
||||
WizardStyle=modern
|
||||
; Add this to ensure PATH changes are applied and the system is prompted for a restart if needed
|
||||
RestartIfNeededByRun=no
|
||||
ChangesEnvironment=true
|
||||
|
||||
[Languages]
|
||||
Name: "english"; MessagesFile: "compiler:Default.isl"
|
||||
|
||||
[Files]
|
||||
; The 'DestName' flag ensures that 'newt_windows_amd64.exe' is installed as 'newt.exe'
|
||||
Source: "Z:\newt_windows_amd64.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}"; Flags: ignoreversion
|
||||
Source: "Z:\wintun.dll"; DestDir: "{app}"; Flags: ignoreversion
|
||||
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
|
||||
|
||||
[Icons]
|
||||
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
|
||||
|
||||
[Registry]
|
||||
; Add the application's installation directory to the system PATH environment variable.
|
||||
; HKLM (HKEY_LOCAL_MACHINE) is used for system-wide changes.
|
||||
; The 'Path' variable is located under 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'.
|
||||
; ValueType: expandsz allows for environment variables (like %ProgramFiles%) in the path.
|
||||
; ValueData: "{olddata};{app}" appends the current application directory to the existing PATH.
|
||||
; Note: Removal during uninstallation is handled by CurUninstallStepChanged procedure in [Code] section.
|
||||
; Check: NeedsAddPath ensures this is applied only if the path is not already present.
|
||||
[Registry]
|
||||
; Add the application's installation directory to the system PATH.
|
||||
Root: HKLM; Subkey: "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"; \
|
||||
ValueType: expandsz; ValueName: "Path"; ValueData: "{olddata};{app}"; \
|
||||
Check: NeedsAddPath(ExpandConstant('{app}'))
|
||||
|
||||
[Code]
|
||||
function NeedsAddPath(Path: string): boolean;
|
||||
var
|
||||
OrigPath: string;
|
||||
begin
|
||||
if not RegQueryStringValue(HKEY_LOCAL_MACHINE,
|
||||
'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
|
||||
'Path', OrigPath)
|
||||
then begin
|
||||
// Path variable doesn't exist at all, so we definitely need to add it.
|
||||
Result := True;
|
||||
exit;
|
||||
end;
|
||||
|
||||
// Perform a case-insensitive check to see if the path is already present.
|
||||
// We add semicolons to prevent partial matches (e.g., matching C:\App in C:\App2).
|
||||
if Pos(';' + UpperCase(Path) + ';', ';' + UpperCase(OrigPath) + ';') > 0 then
|
||||
Result := False
|
||||
else
|
||||
Result := True;
|
||||
end;
|
||||
|
||||
procedure RemovePathEntry(PathToRemove: string);
|
||||
var
|
||||
OrigPath: string;
|
||||
NewPath: string;
|
||||
PathList: TStringList;
|
||||
I: Integer;
|
||||
begin
|
||||
if not RegQueryStringValue(HKEY_LOCAL_MACHINE,
|
||||
'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
|
||||
'Path', OrigPath)
|
||||
then begin
|
||||
// Path variable doesn't exist, nothing to remove
|
||||
exit;
|
||||
end;
|
||||
|
||||
// Create a string list to parse the PATH entries
|
||||
PathList := TStringList.Create;
|
||||
try
|
||||
// Split the PATH by semicolons
|
||||
PathList.Delimiter := ';';
|
||||
PathList.StrictDelimiter := True;
|
||||
PathList.DelimitedText := OrigPath;
|
||||
|
||||
// Find and remove the matching entry (case-insensitive)
|
||||
for I := PathList.Count - 1 downto 0 do
|
||||
begin
|
||||
if CompareText(Trim(PathList[I]), Trim(PathToRemove)) = 0 then
|
||||
begin
|
||||
Log('Found and removing PATH entry: ' + PathList[I]);
|
||||
PathList.Delete(I);
|
||||
end;
|
||||
end;
|
||||
|
||||
// Reconstruct the PATH
|
||||
NewPath := PathList.DelimitedText;
|
||||
|
||||
// Write the new PATH back to the registry
|
||||
if RegWriteExpandStringValue(HKEY_LOCAL_MACHINE,
|
||||
'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
|
||||
'Path', NewPath)
|
||||
then
|
||||
Log('Successfully removed path entry: ' + PathToRemove)
|
||||
else
|
||||
Log('Failed to write modified PATH to registry');
|
||||
finally
|
||||
PathList.Free;
|
||||
end;
|
||||
end;
|
||||
|
||||
procedure CurUninstallStepChanged(CurUninstallStep: TUninstallStep);
|
||||
var
|
||||
AppPath: string;
|
||||
begin
|
||||
if CurUninstallStep = usUninstall then
|
||||
begin
|
||||
// Get the application installation path
|
||||
AppPath := ExpandConstant('{app}');
|
||||
Log('Removing PATH entry for: ' + AppPath);
|
||||
|
||||
// Remove only our path entry from the system PATH
|
||||
RemovePathEntry(AppPath);
|
||||
end;
|
||||
end;
|
||||
454
proxy/manager.go
454
proxy/manager.go
@@ -1,18 +1,28 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/internal/state"
|
||||
"github.com/fosrl/newt/internal/telemetry"
|
||||
"github.com/fosrl/newt/logger"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"golang.zx2c4.com/wireguard/tun/netstack"
|
||||
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
|
||||
)
|
||||
|
||||
const errUnsupportedProtoFmt = "unsupported protocol: %s"
|
||||
|
||||
// Target represents a proxy target with its address and port
|
||||
type Target struct {
|
||||
Address string
|
||||
@@ -28,6 +38,90 @@ type ProxyManager struct {
|
||||
udpConns []*gonet.UDPConn
|
||||
running bool
|
||||
mutex sync.RWMutex
|
||||
|
||||
// telemetry (multi-tunnel)
|
||||
currentTunnelID string
|
||||
tunnels map[string]*tunnelEntry
|
||||
asyncBytes bool
|
||||
flushStop chan struct{}
|
||||
}
|
||||
|
||||
// tunnelEntry holds per-tunnel attributes and (optional) async counters.
|
||||
type tunnelEntry struct {
|
||||
attrInTCP attribute.Set
|
||||
attrOutTCP attribute.Set
|
||||
attrInUDP attribute.Set
|
||||
attrOutUDP attribute.Set
|
||||
|
||||
bytesInTCP atomic.Uint64
|
||||
bytesOutTCP atomic.Uint64
|
||||
bytesInUDP atomic.Uint64
|
||||
bytesOutUDP atomic.Uint64
|
||||
|
||||
activeTCP atomic.Int64
|
||||
activeUDP atomic.Int64
|
||||
}
|
||||
|
||||
// countingWriter wraps an io.Writer and adds bytes to OTel counter using a pre-built attribute set.
|
||||
type countingWriter struct {
|
||||
ctx context.Context
|
||||
w io.Writer
|
||||
set attribute.Set
|
||||
pm *ProxyManager
|
||||
ent *tunnelEntry
|
||||
out bool // false=in, true=out
|
||||
proto string // "tcp" or "udp"
|
||||
}
|
||||
|
||||
func (cw *countingWriter) Write(p []byte) (int, error) {
|
||||
n, err := cw.w.Write(p)
|
||||
if n > 0 {
|
||||
if cw.pm != nil && cw.pm.asyncBytes && cw.ent != nil {
|
||||
switch cw.proto {
|
||||
case "tcp":
|
||||
if cw.out {
|
||||
cw.ent.bytesOutTCP.Add(uint64(n))
|
||||
} else {
|
||||
cw.ent.bytesInTCP.Add(uint64(n))
|
||||
}
|
||||
case "udp":
|
||||
if cw.out {
|
||||
cw.ent.bytesOutUDP.Add(uint64(n))
|
||||
} else {
|
||||
cw.ent.bytesInUDP.Add(uint64(n))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
telemetry.AddTunnelBytesSet(cw.ctx, int64(n), cw.set)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func classifyProxyError(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
if errors.Is(err, net.ErrClosed) {
|
||||
return "closed"
|
||||
}
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Timeout() {
|
||||
return "timeout"
|
||||
}
|
||||
if ne.Temporary() {
|
||||
return "temporary"
|
||||
}
|
||||
}
|
||||
msg := strings.ToLower(err.Error())
|
||||
switch {
|
||||
case strings.Contains(msg, "refused"):
|
||||
return "refused"
|
||||
case strings.Contains(msg, "reset"):
|
||||
return "reset"
|
||||
default:
|
||||
return "io_error"
|
||||
}
|
||||
}
|
||||
|
||||
// NewProxyManager creates a new proxy manager instance
|
||||
@@ -38,9 +132,94 @@ func NewProxyManager(tnet *netstack.Net) *ProxyManager {
|
||||
udpTargets: make(map[string]map[int]string),
|
||||
listeners: make([]*gonet.TCPListener, 0),
|
||||
udpConns: make([]*gonet.UDPConn, 0),
|
||||
tunnels: make(map[string]*tunnelEntry),
|
||||
}
|
||||
}
|
||||
|
||||
// SetTunnelID sets the WireGuard peer public key used as tunnel_id label.
|
||||
func (pm *ProxyManager) SetTunnelID(id string) {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
pm.currentTunnelID = id
|
||||
if _, ok := pm.tunnels[id]; !ok {
|
||||
pm.tunnels[id] = &tunnelEntry{}
|
||||
}
|
||||
e := pm.tunnels[id]
|
||||
// include site labels if available
|
||||
site := telemetry.SiteLabelKVs()
|
||||
build := func(base []attribute.KeyValue) attribute.Set {
|
||||
if telemetry.ShouldIncludeTunnelID() {
|
||||
base = append([]attribute.KeyValue{attribute.String("tunnel_id", id)}, base...)
|
||||
}
|
||||
base = append(site, base...)
|
||||
return attribute.NewSet(base...)
|
||||
}
|
||||
e.attrInTCP = build([]attribute.KeyValue{
|
||||
attribute.String("direction", "ingress"),
|
||||
attribute.String("protocol", "tcp"),
|
||||
})
|
||||
e.attrOutTCP = build([]attribute.KeyValue{
|
||||
attribute.String("direction", "egress"),
|
||||
attribute.String("protocol", "tcp"),
|
||||
})
|
||||
e.attrInUDP = build([]attribute.KeyValue{
|
||||
attribute.String("direction", "ingress"),
|
||||
attribute.String("protocol", "udp"),
|
||||
})
|
||||
e.attrOutUDP = build([]attribute.KeyValue{
|
||||
attribute.String("direction", "egress"),
|
||||
attribute.String("protocol", "udp"),
|
||||
})
|
||||
}
|
||||
|
||||
// ClearTunnelID clears cached attribute sets for the current tunnel.
|
||||
func (pm *ProxyManager) ClearTunnelID() {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
id := pm.currentTunnelID
|
||||
if id == "" {
|
||||
return
|
||||
}
|
||||
if e, ok := pm.tunnels[id]; ok {
|
||||
// final flush for this tunnel
|
||||
inTCP := e.bytesInTCP.Swap(0)
|
||||
outTCP := e.bytesOutTCP.Swap(0)
|
||||
inUDP := e.bytesInUDP.Swap(0)
|
||||
outUDP := e.bytesOutUDP.Swap(0)
|
||||
if inTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inTCP), e.attrInTCP)
|
||||
}
|
||||
if outTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outTCP), e.attrOutTCP)
|
||||
}
|
||||
if inUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inUDP), e.attrInUDP)
|
||||
}
|
||||
if outUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outUDP), e.attrOutUDP)
|
||||
}
|
||||
delete(pm.tunnels, id)
|
||||
}
|
||||
pm.currentTunnelID = ""
|
||||
}
|
||||
|
||||
// init function without tnet
|
||||
func NewProxyManagerWithoutTNet() *ProxyManager {
|
||||
return &ProxyManager{
|
||||
tcpTargets: make(map[string]map[int]string),
|
||||
udpTargets: make(map[string]map[int]string),
|
||||
listeners: make([]*gonet.TCPListener, 0),
|
||||
udpConns: make([]*gonet.UDPConn, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Function to add tnet to existing ProxyManager
|
||||
func (pm *ProxyManager) SetTNet(tnet *netstack.Net) {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
pm.tnet = tnet
|
||||
}
|
||||
|
||||
// AddTarget adds as new target for proxying
|
||||
func (pm *ProxyManager) AddTarget(proto, listenIP string, port int, targetAddr string) error {
|
||||
pm.mutex.Lock()
|
||||
@@ -58,7 +237,7 @@ func (pm *ProxyManager) AddTarget(proto, listenIP string, port int, targetAddr s
|
||||
}
|
||||
pm.udpTargets[listenIP][port] = targetAddr
|
||||
default:
|
||||
return fmt.Errorf("unsupported protocol: %s", proto)
|
||||
return fmt.Errorf(errUnsupportedProtoFmt, proto)
|
||||
}
|
||||
|
||||
if pm.running {
|
||||
@@ -107,13 +286,28 @@ func (pm *ProxyManager) RemoveTarget(proto, listenIP string, port int) error {
|
||||
return fmt.Errorf("target not found: %s:%d", listenIP, port)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unsupported protocol: %s", proto)
|
||||
return fmt.Errorf(errUnsupportedProtoFmt, proto)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start begins listening for all configured proxy targets
|
||||
func (pm *ProxyManager) Start() error {
|
||||
// Register proxy observables once per process
|
||||
telemetry.SetProxyObservableCallback(func(ctx context.Context, o metric.Observer) error {
|
||||
pm.mutex.RLock()
|
||||
defer pm.mutex.RUnlock()
|
||||
for _, e := range pm.tunnels {
|
||||
// active connections
|
||||
telemetry.ObserveProxyActiveConnsObs(o, e.activeTCP.Load(), e.attrOutTCP.ToSlice())
|
||||
telemetry.ObserveProxyActiveConnsObs(o, e.activeUDP.Load(), e.attrOutUDP.ToSlice())
|
||||
// backlog bytes (sum of unflushed counters)
|
||||
b := int64(e.bytesInTCP.Load() + e.bytesOutTCP.Load() + e.bytesInUDP.Load() + e.bytesOutUDP.Load())
|
||||
telemetry.ObserveProxyAsyncBacklogObs(o, b, e.attrOutTCP.ToSlice())
|
||||
telemetry.ObserveProxyBufferBytesObs(o, b, e.attrOutTCP.ToSlice())
|
||||
}
|
||||
return nil
|
||||
})
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
|
||||
@@ -143,6 +337,75 @@ func (pm *ProxyManager) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProxyManager) SetAsyncBytes(b bool) {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
pm.asyncBytes = b
|
||||
if b && pm.flushStop == nil {
|
||||
pm.flushStop = make(chan struct{})
|
||||
go pm.flushLoop()
|
||||
}
|
||||
}
|
||||
func (pm *ProxyManager) flushLoop() {
|
||||
flushInterval := 2 * time.Second
|
||||
if v := os.Getenv("OTEL_METRIC_EXPORT_INTERVAL"); v != "" {
|
||||
if d, err := time.ParseDuration(v); err == nil && d > 0 {
|
||||
if d/2 < flushInterval {
|
||||
flushInterval = d / 2
|
||||
}
|
||||
}
|
||||
}
|
||||
ticker := time.NewTicker(flushInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
pm.mutex.RLock()
|
||||
for _, e := range pm.tunnels {
|
||||
inTCP := e.bytesInTCP.Swap(0)
|
||||
outTCP := e.bytesOutTCP.Swap(0)
|
||||
inUDP := e.bytesInUDP.Swap(0)
|
||||
outUDP := e.bytesOutUDP.Swap(0)
|
||||
if inTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inTCP), e.attrInTCP)
|
||||
}
|
||||
if outTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outTCP), e.attrOutTCP)
|
||||
}
|
||||
if inUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inUDP), e.attrInUDP)
|
||||
}
|
||||
if outUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outUDP), e.attrOutUDP)
|
||||
}
|
||||
}
|
||||
pm.mutex.RUnlock()
|
||||
case <-pm.flushStop:
|
||||
pm.mutex.RLock()
|
||||
for _, e := range pm.tunnels {
|
||||
inTCP := e.bytesInTCP.Swap(0)
|
||||
outTCP := e.bytesOutTCP.Swap(0)
|
||||
inUDP := e.bytesInUDP.Swap(0)
|
||||
outUDP := e.bytesOutUDP.Swap(0)
|
||||
if inTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inTCP), e.attrInTCP)
|
||||
}
|
||||
if outTCP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outTCP), e.attrOutTCP)
|
||||
}
|
||||
if inUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(inUDP), e.attrInUDP)
|
||||
}
|
||||
if outUDP > 0 {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(outUDP), e.attrOutUDP)
|
||||
}
|
||||
}
|
||||
pm.mutex.RUnlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProxyManager) Stop() error {
|
||||
pm.mutex.Lock()
|
||||
defer pm.mutex.Unlock()
|
||||
@@ -174,13 +437,13 @@ func (pm *ProxyManager) Stop() error {
|
||||
pm.udpConns = append(pm.udpConns[:i], pm.udpConns[i+1:]...)
|
||||
}
|
||||
|
||||
// Clear the target maps
|
||||
for k := range pm.tcpTargets {
|
||||
delete(pm.tcpTargets, k)
|
||||
}
|
||||
for k := range pm.udpTargets {
|
||||
delete(pm.udpTargets, k)
|
||||
}
|
||||
// // Clear the target maps
|
||||
// for k := range pm.tcpTargets {
|
||||
// delete(pm.tcpTargets, k)
|
||||
// }
|
||||
// for k := range pm.udpTargets {
|
||||
// delete(pm.udpTargets, k)
|
||||
// }
|
||||
|
||||
// Give active connections a chance to close gracefully
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
@@ -210,62 +473,93 @@ func (pm *ProxyManager) startTarget(proto, listenIP string, port int, targetAddr
|
||||
go pm.handleUDPProxy(conn, targetAddr)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unsupported protocol: %s", proto)
|
||||
return fmt.Errorf(errUnsupportedProtoFmt, proto)
|
||||
}
|
||||
|
||||
logger.Info("Started %s proxy from %s:%d to %s", proto, listenIP, port, targetAddr)
|
||||
logger.Info("Started %s proxy to %s", proto, targetAddr)
|
||||
logger.Debug("Started %s proxy from %s:%d to %s", proto, listenIP, port, targetAddr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getEntry returns per-tunnel entry or nil.
|
||||
func (pm *ProxyManager) getEntry(id string) *tunnelEntry {
|
||||
pm.mutex.RLock()
|
||||
e := pm.tunnels[id]
|
||||
pm.mutex.RUnlock()
|
||||
return e
|
||||
}
|
||||
|
||||
func (pm *ProxyManager) handleTCPProxy(listener net.Listener, targetAddr string) {
|
||||
for {
|
||||
conn, err := listener.Accept()
|
||||
if err != nil {
|
||||
// Check if we're shutting down or the listener was closed
|
||||
telemetry.IncProxyAccept(context.Background(), pm.currentTunnelID, "tcp", "failure", classifyProxyError(err))
|
||||
if !pm.running {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for specific network errors that indicate the listener is closed
|
||||
if ne, ok := err.(net.Error); ok && !ne.Temporary() {
|
||||
logger.Info("TCP listener closed, stopping proxy handler for %v", listener.Addr())
|
||||
return
|
||||
}
|
||||
|
||||
logger.Error("Error accepting TCP connection: %v", err)
|
||||
// Don't hammer the CPU if we hit a temporary error
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
|
||||
go func() {
|
||||
tunnelID := pm.currentTunnelID
|
||||
telemetry.IncProxyAccept(context.Background(), tunnelID, "tcp", "success", "")
|
||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "tcp", telemetry.ProxyConnectionOpened)
|
||||
if tunnelID != "" {
|
||||
state.Global().IncSessions(tunnelID)
|
||||
if e := pm.getEntry(tunnelID); e != nil {
|
||||
e.activeTCP.Add(1)
|
||||
}
|
||||
}
|
||||
|
||||
go func(tunnelID string, accepted net.Conn) {
|
||||
connStart := time.Now()
|
||||
target, err := net.Dial("tcp", targetAddr)
|
||||
if err != nil {
|
||||
logger.Error("Error connecting to target: %v", err)
|
||||
conn.Close()
|
||||
accepted.Close()
|
||||
telemetry.IncProxyAccept(context.Background(), tunnelID, "tcp", "failure", classifyProxyError(err))
|
||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "tcp", telemetry.ProxyConnectionClosed)
|
||||
telemetry.ObserveProxyConnectionDuration(context.Background(), tunnelID, "tcp", "failure", time.Since(connStart).Seconds())
|
||||
return
|
||||
}
|
||||
|
||||
// Create a WaitGroup to ensure both copy operations complete
|
||||
entry := pm.getEntry(tunnelID)
|
||||
if entry == nil {
|
||||
entry = &tunnelEntry{}
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
go func(ent *tunnelEntry) {
|
||||
defer wg.Done()
|
||||
io.Copy(target, conn)
|
||||
target.Close()
|
||||
}()
|
||||
cw := &countingWriter{ctx: context.Background(), w: target, set: ent.attrInTCP, pm: pm, ent: ent, out: false, proto: "tcp"}
|
||||
_, _ = io.Copy(cw, accepted)
|
||||
_ = target.Close()
|
||||
}(entry)
|
||||
|
||||
go func() {
|
||||
go func(ent *tunnelEntry) {
|
||||
defer wg.Done()
|
||||
io.Copy(conn, target)
|
||||
conn.Close()
|
||||
}()
|
||||
cw := &countingWriter{ctx: context.Background(), w: accepted, set: ent.attrOutTCP, pm: pm, ent: ent, out: true, proto: "tcp"}
|
||||
_, _ = io.Copy(cw, target)
|
||||
_ = accepted.Close()
|
||||
}(entry)
|
||||
|
||||
// Wait for both copies to complete
|
||||
wg.Wait()
|
||||
}()
|
||||
if tunnelID != "" {
|
||||
state.Global().DecSessions(tunnelID)
|
||||
if e := pm.getEntry(tunnelID); e != nil {
|
||||
e.activeTCP.Add(-1)
|
||||
}
|
||||
}
|
||||
telemetry.ObserveProxyConnectionDuration(context.Background(), tunnelID, "tcp", "success", time.Since(connStart).Seconds())
|
||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "tcp", telemetry.ProxyConnectionClosed)
|
||||
}(tunnelID, conn)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,6 +572,13 @@ func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
||||
n, remoteAddr, err := conn.ReadFrom(buffer)
|
||||
if err != nil {
|
||||
if !pm.running {
|
||||
// Clean up all connections when stopping
|
||||
clientsMutex.Lock()
|
||||
for _, targetConn := range clientConns {
|
||||
targetConn.Close()
|
||||
}
|
||||
clientConns = nil
|
||||
clientsMutex.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -301,6 +602,18 @@ func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
||||
}
|
||||
|
||||
clientKey := remoteAddr.String()
|
||||
// bytes from client -> target (direction=in)
|
||||
if pm.currentTunnelID != "" && n > 0 {
|
||||
if pm.asyncBytes {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
e.bytesInUDP.Add(uint64(n))
|
||||
}
|
||||
} else {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(n), e.attrInUDP)
|
||||
}
|
||||
}
|
||||
}
|
||||
clientsMutex.RLock()
|
||||
targetConn, exists := clientConns[clientKey]
|
||||
clientsMutex.RUnlock()
|
||||
@@ -309,44 +622,117 @@ func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
||||
targetUDPAddr, err := net.ResolveUDPAddr("udp", targetAddr)
|
||||
if err != nil {
|
||||
logger.Error("Error resolving target address: %v", err)
|
||||
telemetry.IncProxyAccept(context.Background(), pm.currentTunnelID, "udp", "failure", "resolve")
|
||||
continue
|
||||
}
|
||||
|
||||
targetConn, err = net.DialUDP("udp", nil, targetUDPAddr)
|
||||
if err != nil {
|
||||
logger.Error("Error connecting to target: %v", err)
|
||||
telemetry.IncProxyAccept(context.Background(), pm.currentTunnelID, "udp", "failure", classifyProxyError(err))
|
||||
continue
|
||||
}
|
||||
tunnelID := pm.currentTunnelID
|
||||
telemetry.IncProxyAccept(context.Background(), tunnelID, "udp", "success", "")
|
||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "udp", telemetry.ProxyConnectionOpened)
|
||||
// Only increment activeUDP after a successful DialUDP
|
||||
if e := pm.getEntry(tunnelID); e != nil {
|
||||
e.activeUDP.Add(1)
|
||||
}
|
||||
|
||||
clientsMutex.Lock()
|
||||
clientConns[clientKey] = targetConn
|
||||
clientsMutex.Unlock()
|
||||
|
||||
go func() {
|
||||
go func(clientKey string, targetConn *net.UDPConn, remoteAddr net.Addr, tunnelID string) {
|
||||
start := time.Now()
|
||||
result := "success"
|
||||
defer func() {
|
||||
// Always clean up when this goroutine exits
|
||||
clientsMutex.Lock()
|
||||
if storedConn, exists := clientConns[clientKey]; exists && storedConn == targetConn {
|
||||
delete(clientConns, clientKey)
|
||||
targetConn.Close()
|
||||
if e := pm.getEntry(tunnelID); e != nil {
|
||||
e.activeUDP.Add(-1)
|
||||
}
|
||||
}
|
||||
clientsMutex.Unlock()
|
||||
telemetry.ObserveProxyConnectionDuration(context.Background(), tunnelID, "udp", result, time.Since(start).Seconds())
|
||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "udp", telemetry.ProxyConnectionClosed)
|
||||
}()
|
||||
|
||||
buffer := make([]byte, 65507)
|
||||
for {
|
||||
n, _, err := targetConn.ReadFromUDP(buffer)
|
||||
if err != nil {
|
||||
logger.Error("Error reading from target: %v", err)
|
||||
return
|
||||
result = "failure"
|
||||
return // defer will handle cleanup
|
||||
}
|
||||
|
||||
// bytes from target -> client (direction=out)
|
||||
if pm.currentTunnelID != "" && n > 0 {
|
||||
if pm.asyncBytes {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
e.bytesOutUDP.Add(uint64(n))
|
||||
}
|
||||
} else {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(n), e.attrOutUDP)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_, err = conn.WriteTo(buffer[:n], remoteAddr)
|
||||
if err != nil {
|
||||
logger.Error("Error writing to client: %v", err)
|
||||
return
|
||||
telemetry.IncProxyDrops(context.Background(), pm.currentTunnelID, "udp")
|
||||
result = "failure"
|
||||
return // defer will handle cleanup
|
||||
}
|
||||
}
|
||||
}()
|
||||
}(clientKey, targetConn, remoteAddr, tunnelID)
|
||||
}
|
||||
|
||||
_, err = targetConn.Write(buffer[:n])
|
||||
written, err := targetConn.Write(buffer[:n])
|
||||
if err != nil {
|
||||
logger.Error("Error writing to target: %v", err)
|
||||
telemetry.IncProxyDrops(context.Background(), pm.currentTunnelID, "udp")
|
||||
targetConn.Close()
|
||||
clientsMutex.Lock()
|
||||
delete(clientConns, clientKey)
|
||||
clientsMutex.Unlock()
|
||||
} else if pm.currentTunnelID != "" && written > 0 {
|
||||
if pm.asyncBytes {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
e.bytesInUDP.Add(uint64(written))
|
||||
}
|
||||
} else {
|
||||
if e := pm.getEntry(pm.currentTunnelID); e != nil {
|
||||
telemetry.AddTunnelBytesSet(context.Background(), int64(written), e.attrInUDP)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// write a function to print out the current targets in the ProxyManager
|
||||
func (pm *ProxyManager) PrintTargets() {
|
||||
pm.mutex.RLock()
|
||||
defer pm.mutex.RUnlock()
|
||||
|
||||
logger.Info("Current TCP Targets:")
|
||||
for listenIP, targets := range pm.tcpTargets {
|
||||
for port, targetAddr := range targets {
|
||||
logger.Info("TCP %s:%d -> %s", listenIP, port, targetAddr)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Current UDP Targets:")
|
||||
for listenIP, targets := range pm.udpTargets {
|
||||
for port, targetAddr := range targets {
|
||||
logger.Info("UDP %s:%d -> %s", listenIP, port, targetAddr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 774 KiB After Width: | Height: | Size: 93 KiB |
22
scripts/append-release-notes.sh
Normal file
22
scripts/append-release-notes.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${TAG:?}"
|
||||
: "${GHCR_REF:?}"
|
||||
: "${DIGEST:?}"
|
||||
|
||||
NOTES_FILE="$(mktemp)"
|
||||
|
||||
existing_body="$(gh release view "${TAG}" --json body --jq '.body')"
|
||||
cat > "${NOTES_FILE}" <<EOF
|
||||
${existing_body}
|
||||
|
||||
## Container Images
|
||||
- GHCR: \`${GHCR_REF}\`
|
||||
- Docker Hub: \`${DH_REF:-N/A}\`
|
||||
**Digest:** \`${DIGEST}\`
|
||||
EOF
|
||||
|
||||
gh release edit "${TAG}" --draft --notes-file "${NOTES_FILE}"
|
||||
|
||||
rm -f "${NOTES_FILE}"
|
||||
11
scripts/nfpm.yaml.tmpl
Normal file
11
scripts/nfpm.yaml.tmpl
Normal file
@@ -0,0 +1,11 @@
|
||||
name: __PKG_NAME__
|
||||
arch: __ARCH__
|
||||
platform: linux
|
||||
version: __VERSION__
|
||||
section: net
|
||||
priority: optional
|
||||
maintainer: fosrl
|
||||
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||
contents:
|
||||
- src: build/newt
|
||||
dst: /usr/bin/newt
|
||||
149
scripts/publish-apt.sh
Normal file
149
scripts/publish-apt.sh
Normal file
@@ -0,0 +1,149 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# ---- required env ----
|
||||
: "${GH_REPO:?}"
|
||||
: "${S3_BUCKET:?}"
|
||||
: "${AWS_REGION:?}"
|
||||
: "${CLOUDFRONT_DISTRIBUTION_ID:?}"
|
||||
: "${PKG_NAME:?}"
|
||||
: "${SUITE:?}"
|
||||
: "${COMPONENT:?}"
|
||||
: "${APT_GPG_PRIVATE_KEY:?}"
|
||||
|
||||
S3_PREFIX="${S3_PREFIX:-}"
|
||||
if [[ -n "${S3_PREFIX}" && "${S3_PREFIX}" != */ ]]; then
|
||||
S3_PREFIX="${S3_PREFIX}/"
|
||||
fi
|
||||
|
||||
WORKDIR="$(pwd)"
|
||||
mkdir -p repo/apt assets build
|
||||
|
||||
download_asset() {
|
||||
local tag="$1"
|
||||
local pattern="$2"
|
||||
local attempts=12
|
||||
|
||||
for attempt in $(seq 1 "${attempts}"); do
|
||||
if gh release download "${tag}" -R "${GH_REPO}" -p "${pattern}" -D assets; then
|
||||
return 0
|
||||
fi
|
||||
echo "Asset ${pattern} not available yet (attempt ${attempt}/${attempts}); retrying..."
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "ERROR: Failed to download asset ${pattern} for ${tag} after ${attempts} attempts"
|
||||
return 1
|
||||
}
|
||||
|
||||
echo "${APT_GPG_PRIVATE_KEY}" | gpg --batch --import >/dev/null 2>&1 || true
|
||||
|
||||
KEYID="$(gpg --list-secret-keys --with-colons | awk -F: '$1=="sec"{print $5; exit}')"
|
||||
if [[ -z "${KEYID}" ]]; then
|
||||
echo "ERROR: No GPG secret key available after import."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Determine which tags to process
|
||||
TAGS=""
|
||||
if [[ "${BACKFILL_ALL:-false}" == "true" ]]; then
|
||||
echo "Backfill mode: collecting all release tags..."
|
||||
TAGS="$(gh release list -R "${GH_REPO}" --limit 200 --json tagName --jq '.[].tagName')"
|
||||
else
|
||||
if [[ -n "${INPUT_TAG:-}" ]]; then
|
||||
TAGS="${INPUT_TAG}"
|
||||
elif [[ -n "${EVENT_TAG:-}" ]]; then
|
||||
TAGS="${EVENT_TAG}"
|
||||
elif [[ -n "${PUSH_TAG:-}" ]]; then
|
||||
TAGS="${PUSH_TAG}"
|
||||
else
|
||||
echo "No tag provided; using latest release tag..."
|
||||
TAGS="$(gh release view -R "${GH_REPO}" --json tagName --jq '.tagName')"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Tags to process:"
|
||||
printf '%s\n' "${TAGS}"
|
||||
|
||||
# Pull existing repo from S3 so we keep older versions
|
||||
echo "Sync existing repo from S3..."
|
||||
aws s3 sync "s3://${S3_BUCKET}/${S3_PREFIX}apt/" repo/apt/ >/dev/null 2>&1 || true
|
||||
|
||||
# Build and add packages
|
||||
while IFS= read -r TAG; do
|
||||
[[ -z "${TAG}" ]] && continue
|
||||
echo "=== Processing tag: ${TAG} ==="
|
||||
|
||||
rm -rf assets build
|
||||
mkdir -p assets build
|
||||
|
||||
deb_amd64="${PKG_NAME}_${TAG}_amd64.deb"
|
||||
deb_arm64="${PKG_NAME}_${TAG}_arm64.deb"
|
||||
|
||||
download_asset "${TAG}" "${deb_amd64}"
|
||||
download_asset "${TAG}" "${deb_arm64}"
|
||||
|
||||
if [[ ! -f "assets/${deb_amd64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_amd64}"
|
||||
exit 1
|
||||
fi
|
||||
if [[ ! -f "assets/${deb_arm64}" ]]; then
|
||||
echo "ERROR: Missing release asset: ${deb_arm64}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkdir -p "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
cp -v assets/*.deb "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
|
||||
done <<< "${TAGS}"
|
||||
|
||||
# Regenerate metadata
|
||||
cd repo/apt
|
||||
|
||||
for arch in amd64 arm64; do
|
||||
mkdir -p "dists/${SUITE}/${COMPONENT}/binary-${arch}"
|
||||
dpkg-scanpackages -a "${arch}" pool > "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
gzip -fk "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||
done
|
||||
|
||||
# Release file with hashes
|
||||
cat > apt-ftparchive.conf <<EOF
|
||||
APT::FTPArchive::Release::Origin "fosrl";
|
||||
APT::FTPArchive::Release::Label "newt";
|
||||
APT::FTPArchive::Release::Suite "${SUITE}";
|
||||
APT::FTPArchive::Release::Codename "${SUITE}";
|
||||
APT::FTPArchive::Release::Architectures "amd64 arm64";
|
||||
APT::FTPArchive::Release::Components "${COMPONENT}";
|
||||
APT::FTPArchive::Release::Description "Newt APT repository";
|
||||
EOF
|
||||
|
||||
apt-ftparchive -c apt-ftparchive.conf release "dists/${SUITE}" > "dists/${SUITE}/Release"
|
||||
|
||||
# Sign Release
|
||||
cd "dists/${SUITE}"
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
--clearsign -o InRelease Release
|
||||
|
||||
gpg --batch --yes --pinentry-mode loopback \
|
||||
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||
--local-user "${KEYID}" \
|
||||
-abs -o Release.gpg Release
|
||||
|
||||
# Export public key into apt repo root
|
||||
cd ../../..
|
||||
gpg --batch --yes --armor --export "${KEYID}" > "${WORKDIR}/repo/apt/public.key"
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading to S3..."
|
||||
aws s3 sync "${WORKDIR}/repo/apt" "s3://${S3_BUCKET}/${S3_PREFIX}apt/" --delete
|
||||
|
||||
# Invalidate metadata
|
||||
echo "CloudFront invalidation..."
|
||||
aws cloudfront create-invalidation \
|
||||
--distribution-id "${CLOUDFRONT_DISTRIBUTION_ID}" \
|
||||
--paths "/${S3_PREFIX}apt/dists/*" "/${S3_PREFIX}apt/public.key"
|
||||
|
||||
echo "Done. Repo base: ${REPO_BASE_URL}"
|
||||
125
self-signed-certs-for-mtls.sh
Executable file
125
self-signed-certs-for-mtls.sh
Executable file
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env bash
|
||||
set -eu
|
||||
|
||||
echo -n "Enter username for certs (eg alice): "
|
||||
read CERT_USERNAME
|
||||
echo
|
||||
|
||||
echo -n "Enter domain of user (eg example.com): "
|
||||
read DOMAIN
|
||||
echo
|
||||
|
||||
# Prompt for password at the start
|
||||
echo -n "Enter password for certificate: "
|
||||
read -s PASSWORD
|
||||
echo
|
||||
echo -n "Confirm password: "
|
||||
read -s PASSWORD2
|
||||
echo
|
||||
|
||||
if [ "$PASSWORD" != "$PASSWORD2" ]; then
|
||||
echo "Passwords don't match!"
|
||||
exit 1
|
||||
fi
|
||||
CA_DIR="./certs/ca"
|
||||
CLIENT_DIR="./certs/clients"
|
||||
FILE_PREFIX=$(echo "$CERT_USERNAME-at-$DOMAIN" | sed 's/\./-/')
|
||||
|
||||
mkdir -p "$CA_DIR"
|
||||
mkdir -p "$CLIENT_DIR"
|
||||
|
||||
if [ ! -f "$CA_DIR/ca.crt" ]; then
|
||||
# Generate CA private key
|
||||
openssl genrsa -out "$CA_DIR/ca.key" 4096
|
||||
echo "CA key ✅"
|
||||
|
||||
# Generate CA root certificate
|
||||
openssl req -x509 -new -nodes \
|
||||
-key "$CA_DIR/ca.key" \
|
||||
-sha256 \
|
||||
-days 3650 \
|
||||
-out "$CA_DIR/ca.crt" \
|
||||
-subj "/C=US/ST=State/L=City/O=Organization/OU=Unit/CN=ca.$DOMAIN"
|
||||
|
||||
echo "CA cert ✅"
|
||||
fi
|
||||
|
||||
# Generate client private key
|
||||
openssl genrsa -aes256 -passout pass:"$PASSWORD" -out "$CLIENT_DIR/$FILE_PREFIX.key" 2048
|
||||
echo "Client key ✅"
|
||||
|
||||
# Generate client Certificate Signing Request (CSR)
|
||||
openssl req -new \
|
||||
-key "$CLIENT_DIR/$FILE_PREFIX.key" \
|
||||
-out "$CLIENT_DIR/$FILE_PREFIX.csr" \
|
||||
-passin pass:"$PASSWORD" \
|
||||
-subj "/C=US/ST=State/L=City/O=Organization/OU=Unit/CN=$CERT_USERNAME@$DOMAIN"
|
||||
echo "Client cert ✅"
|
||||
|
||||
echo -n "Signing client cert..."
|
||||
# Create client certificate configuration file
|
||||
cat > "$CLIENT_DIR/$FILE_PREFIX.ext" << EOF
|
||||
authorityKeyIdentifier=keyid,issuer
|
||||
basicConstraints=CA:FALSE
|
||||
keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment
|
||||
subjectAltName = @alt_names
|
||||
|
||||
[alt_names]
|
||||
DNS.1 = $DOMAIN
|
||||
EOF
|
||||
|
||||
# Generate client certificate signed by CA
|
||||
openssl x509 -req \
|
||||
-in "$CLIENT_DIR/$FILE_PREFIX.csr" \
|
||||
-CA "$CA_DIR/ca.crt" \
|
||||
-CAkey "$CA_DIR/ca.key" \
|
||||
-CAcreateserial \
|
||||
-out "$CLIENT_DIR/$FILE_PREFIX.crt" \
|
||||
-days 365 \
|
||||
-sha256 \
|
||||
-extfile "$CLIENT_DIR/$FILE_PREFIX.ext"
|
||||
|
||||
# Verify the client certificate
|
||||
openssl verify -CAfile "$CA_DIR/ca.crt" "$CLIENT_DIR/$FILE_PREFIX.crt"
|
||||
echo "Signed ✅"
|
||||
|
||||
# Create encrypted PEM bundle
|
||||
openssl rsa -in "$CLIENT_DIR/$FILE_PREFIX.key" -passin pass:"$PASSWORD" \
|
||||
| cat "$CLIENT_DIR/$FILE_PREFIX.crt" - > "$CLIENT_DIR/$FILE_PREFIX-bundle.enc.pem"
|
||||
|
||||
|
||||
# Convert to PKCS12
|
||||
echo "Converting to PKCS12 format..."
|
||||
openssl pkcs12 -export \
|
||||
-out "$CLIENT_DIR/$FILE_PREFIX.enc.p12" \
|
||||
-inkey "$CLIENT_DIR/$FILE_PREFIX.key" \
|
||||
-in "$CLIENT_DIR/$FILE_PREFIX.crt" \
|
||||
-certfile "$CA_DIR/ca.crt" \
|
||||
-name "$CERT_USERNAME@$DOMAIN" \
|
||||
-passin pass:"$PASSWORD" \
|
||||
-passout pass:"$PASSWORD"
|
||||
echo "Converted to encrypted p12 for macOS ✅"
|
||||
|
||||
# Convert to PKCS12 format without encryption
|
||||
echo "Converting to non-encrypted PKCS12 format..."
|
||||
openssl pkcs12 -export \
|
||||
-out "$CLIENT_DIR/$FILE_PREFIX.p12" \
|
||||
-inkey "$CLIENT_DIR/$FILE_PREFIX.key" \
|
||||
-in "$CLIENT_DIR/$FILE_PREFIX.crt" \
|
||||
-certfile "$CA_DIR/ca.crt" \
|
||||
-name "$CERT_USERNAME@$DOMAIN" \
|
||||
-passin pass:"$PASSWORD" \
|
||||
-passout pass:""
|
||||
echo "Converted to non-encrypted p12 ✅"
|
||||
|
||||
# Clean up intermediate files
|
||||
rm "$CLIENT_DIR/$FILE_PREFIX.csr" "$CLIENT_DIR/$FILE_PREFIX.ext" "$CA_DIR/ca.srl"
|
||||
echo
|
||||
echo
|
||||
|
||||
echo "CA certificate: $CA_DIR/ca.crt"
|
||||
echo "CA private key: $CA_DIR/ca.key"
|
||||
echo "Client certificate: $CLIENT_DIR/$FILE_PREFIX.crt"
|
||||
echo "Client private key: $CLIENT_DIR/$FILE_PREFIX.key"
|
||||
echo "Client cert bundle: $CLIENT_DIR/$FILE_PREFIX.p12"
|
||||
echo "Client cert bundle (encrypted): $CLIENT_DIR/$FILE_PREFIX.enc.p12"
|
||||
59
service_unix.go
Normal file
59
service_unix.go
Normal file
@@ -0,0 +1,59 @@
|
||||
//go:build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Service management functions are not available on non-Windows platforms
|
||||
func installService() error {
|
||||
return fmt.Errorf("service management is only available on Windows")
|
||||
}
|
||||
|
||||
func removeService() error {
|
||||
return fmt.Errorf("service management is only available on Windows")
|
||||
}
|
||||
|
||||
func startService(args []string) error {
|
||||
_ = args // unused on Unix platforms
|
||||
return fmt.Errorf("service management is only available on Windows")
|
||||
}
|
||||
|
||||
func stopService() error {
|
||||
return fmt.Errorf("service management is only available on Windows")
|
||||
}
|
||||
|
||||
func getServiceStatus() (string, error) {
|
||||
return "", fmt.Errorf("service management is only available on Windows")
|
||||
}
|
||||
|
||||
func debugService(args []string) error {
|
||||
_ = args // unused on Unix platforms
|
||||
return fmt.Errorf("debug service is only available on Windows")
|
||||
}
|
||||
|
||||
func isWindowsService() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func runService(name string, isDebug bool, args []string) {
|
||||
// No-op on non-Windows platforms
|
||||
}
|
||||
|
||||
func setupWindowsEventLog() {
|
||||
// No-op on non-Windows platforms
|
||||
}
|
||||
|
||||
func watchLogFile(end bool) error {
|
||||
return fmt.Errorf("watching log file is only available on Windows")
|
||||
}
|
||||
|
||||
func showServiceConfig() {
|
||||
fmt.Println("Service configuration is only available on Windows")
|
||||
}
|
||||
|
||||
// handleServiceCommand returns false on non-Windows platforms
|
||||
func handleServiceCommand() bool {
|
||||
return false
|
||||
}
|
||||
760
service_windows.go
Normal file
760
service_windows.go
Normal file
@@ -0,0 +1,760 @@
|
||||
//go:build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.org/x/sys/windows/svc"
|
||||
"golang.org/x/sys/windows/svc/debug"
|
||||
"golang.org/x/sys/windows/svc/eventlog"
|
||||
"golang.org/x/sys/windows/svc/mgr"
|
||||
)
|
||||
|
||||
const (
|
||||
serviceName = "NewtWireguardService"
|
||||
serviceDisplayName = "Newt WireGuard Tunnel Service"
|
||||
serviceDescription = "Newt WireGuard tunnel service for secure network connectivity"
|
||||
)
|
||||
|
||||
// Global variable to store service arguments
|
||||
var serviceArgs []string
|
||||
|
||||
// getServiceArgsPath returns the path where service arguments are stored
|
||||
func getServiceArgsPath() string {
|
||||
logDir := filepath.Join(os.Getenv("PROGRAMDATA"), "newt")
|
||||
return filepath.Join(logDir, "service_args.json")
|
||||
}
|
||||
|
||||
// saveServiceArgs saves the service arguments to a file
|
||||
func saveServiceArgs(args []string) error {
|
||||
logDir := filepath.Join(os.Getenv("PROGRAMDATA"), "newt")
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %v", err)
|
||||
}
|
||||
|
||||
argsPath := getServiceArgsPath()
|
||||
data, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal service args: %v", err)
|
||||
}
|
||||
|
||||
err = os.WriteFile(argsPath, data, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write service args: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadServiceArgs loads the service arguments from a file
|
||||
func loadServiceArgs() ([]string, error) {
|
||||
argsPath := getServiceArgsPath()
|
||||
data, err := os.ReadFile(argsPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return []string{}, nil // Return empty args if file doesn't exist
|
||||
}
|
||||
return nil, fmt.Errorf("failed to read service args: %v", err)
|
||||
}
|
||||
|
||||
var args []string
|
||||
err = json.Unmarshal(data, &args)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal service args: %v", err)
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
type newtService struct {
|
||||
elog debug.Log
|
||||
ctx context.Context
|
||||
stop context.CancelFunc
|
||||
args []string
|
||||
}
|
||||
|
||||
func (s *newtService) Execute(args []string, r <-chan svc.ChangeRequest, changes chan<- svc.Status) (bool, uint32) {
|
||||
const cmdsAccepted = svc.AcceptStop | svc.AcceptShutdown
|
||||
changes <- svc.Status{State: svc.StartPending}
|
||||
|
||||
s.elog.Info(1, fmt.Sprintf("Service Execute called with args: %v", args))
|
||||
|
||||
// Load saved service arguments
|
||||
savedArgs, err := loadServiceArgs()
|
||||
if err != nil {
|
||||
s.elog.Error(1, fmt.Sprintf("Failed to load service args: %v", err))
|
||||
// Continue with empty args if loading fails
|
||||
savedArgs = []string{}
|
||||
}
|
||||
s.elog.Info(1, fmt.Sprintf("Loaded saved service args: %v", savedArgs))
|
||||
|
||||
// Combine service start args with saved args, giving priority to service start args
|
||||
// Note: When the service is started via SCM, args[0] is the service name
|
||||
// When started via s.Start(args...), the args passed are exactly what we provide
|
||||
finalArgs := []string{}
|
||||
|
||||
// Check if we have args passed directly to Execute (from s.Start())
|
||||
if len(args) > 0 {
|
||||
// The first arg from SCM is the service name, but when we call s.Start(args...),
|
||||
// the args we pass become args[1:] in Execute. However, if started by SCM without
|
||||
// args, args[0] will be the service name.
|
||||
// We need to check if args[0] looks like the service name or a flag
|
||||
if len(args) == 1 && args[0] == serviceName {
|
||||
// Only service name, no actual args
|
||||
s.elog.Info(1, "Only service name in args, checking saved args")
|
||||
} else if len(args) > 1 && args[0] == serviceName {
|
||||
// Service name followed by actual args
|
||||
finalArgs = append(finalArgs, args[1:]...)
|
||||
s.elog.Info(1, fmt.Sprintf("Using service start parameters (after service name): %v", finalArgs))
|
||||
} else {
|
||||
// Args don't start with service name, use them all
|
||||
// This happens when args are passed via s.Start(args...)
|
||||
finalArgs = append(finalArgs, args...)
|
||||
s.elog.Info(1, fmt.Sprintf("Using service start parameters (direct): %v", finalArgs))
|
||||
}
|
||||
}
|
||||
|
||||
// If no service start parameters, use saved args
|
||||
if len(finalArgs) == 0 && len(savedArgs) > 0 {
|
||||
finalArgs = savedArgs
|
||||
s.elog.Info(1, fmt.Sprintf("Using saved service args: %v", finalArgs))
|
||||
}
|
||||
|
||||
s.elog.Info(1, fmt.Sprintf("Final args to use: %v", finalArgs))
|
||||
s.args = finalArgs
|
||||
|
||||
// Start the main newt functionality
|
||||
newtDone := make(chan struct{})
|
||||
go func() {
|
||||
s.runNewt()
|
||||
close(newtDone)
|
||||
}()
|
||||
|
||||
changes <- svc.Status{State: svc.Running, Accepts: cmdsAccepted}
|
||||
s.elog.Info(1, "Service status set to Running")
|
||||
|
||||
for {
|
||||
select {
|
||||
case c := <-r:
|
||||
switch c.Cmd {
|
||||
case svc.Interrogate:
|
||||
changes <- c.CurrentStatus
|
||||
case svc.Stop, svc.Shutdown:
|
||||
s.elog.Info(1, "Service stopping")
|
||||
changes <- svc.Status{State: svc.StopPending}
|
||||
if s.stop != nil {
|
||||
s.stop()
|
||||
}
|
||||
// Wait for main logic to finish or timeout
|
||||
select {
|
||||
case <-newtDone:
|
||||
s.elog.Info(1, "Main logic finished gracefully")
|
||||
case <-time.After(10 * time.Second):
|
||||
s.elog.Info(1, "Timeout waiting for main logic to finish")
|
||||
}
|
||||
return false, 0
|
||||
default:
|
||||
s.elog.Error(1, fmt.Sprintf("Unexpected control request #%d", c))
|
||||
}
|
||||
case <-newtDone:
|
||||
s.elog.Info(1, "Main newt logic completed, stopping service")
|
||||
changes <- svc.Status{State: svc.StopPending}
|
||||
return false, 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *newtService) runNewt() {
|
||||
// Create a context that can be cancelled when the service stops
|
||||
s.ctx, s.stop = context.WithCancel(context.Background())
|
||||
|
||||
// Setup logging for service mode
|
||||
s.elog.Info(1, "Starting Newt main logic")
|
||||
|
||||
// Run the main newt logic and wait for it to complete
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
s.elog.Error(1, fmt.Sprintf("Panic in newt main: %v", r))
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Call the main newt function with stored arguments
|
||||
// Use s.ctx as the signal context since the service manages shutdown
|
||||
runNewtMainWithArgs(s.ctx, s.args)
|
||||
}()
|
||||
|
||||
// Wait for either context cancellation or main logic completion
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
s.elog.Info(1, "Newt service context cancelled")
|
||||
case <-done:
|
||||
s.elog.Info(1, "Newt main logic completed")
|
||||
}
|
||||
}
|
||||
|
||||
func runService(name string, isDebug bool, args []string) {
|
||||
var err error
|
||||
var elog debug.Log
|
||||
|
||||
if isDebug {
|
||||
elog = debug.New(name)
|
||||
fmt.Printf("Starting %s service in debug mode\n", name)
|
||||
} else {
|
||||
elog, err = eventlog.Open(name)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open event log: %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
defer elog.Close()
|
||||
|
||||
elog.Info(1, fmt.Sprintf("Starting %s service", name))
|
||||
run := svc.Run
|
||||
if isDebug {
|
||||
run = debug.Run
|
||||
}
|
||||
|
||||
service := &newtService{elog: elog, args: args}
|
||||
err = run(name, service)
|
||||
if err != nil {
|
||||
elog.Error(1, fmt.Sprintf("%s service failed: %v", name, err))
|
||||
if isDebug {
|
||||
fmt.Printf("Service failed: %v\n", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
elog.Info(1, fmt.Sprintf("%s service stopped", name))
|
||||
if isDebug {
|
||||
fmt.Printf("%s service stopped\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
func installService() error {
|
||||
exepath, err := os.Executable()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get executable path: %v", err)
|
||||
}
|
||||
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to service manager: %v", err)
|
||||
}
|
||||
defer m.Disconnect()
|
||||
|
||||
s, err := m.OpenService(serviceName)
|
||||
if err == nil {
|
||||
s.Close()
|
||||
return fmt.Errorf("service %s already exists", serviceName)
|
||||
}
|
||||
|
||||
config := mgr.Config{
|
||||
ServiceType: 0x10, // SERVICE_WIN32_OWN_PROCESS
|
||||
StartType: mgr.StartManual,
|
||||
ErrorControl: mgr.ErrorNormal,
|
||||
DisplayName: serviceDisplayName,
|
||||
Description: serviceDescription,
|
||||
BinaryPathName: exepath,
|
||||
}
|
||||
|
||||
s, err = m.CreateService(serviceName, exepath, config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create service: %v", err)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
err = eventlog.InstallAsEventCreate(serviceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
||||
if err != nil {
|
||||
s.Delete()
|
||||
return fmt.Errorf("failed to install event log: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeService() error {
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to service manager: %v", err)
|
||||
}
|
||||
defer m.Disconnect()
|
||||
|
||||
s, err := m.OpenService(serviceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s is not installed", serviceName)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Stop the service if it's running
|
||||
status, err := s.Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query service status: %v", err)
|
||||
}
|
||||
|
||||
if status.State != svc.Stopped {
|
||||
_, err = s.Control(svc.Stop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop service: %v", err)
|
||||
}
|
||||
|
||||
// Wait for service to stop
|
||||
timeout := time.Now().Add(30 * time.Second)
|
||||
for status.State != svc.Stopped {
|
||||
if timeout.Before(time.Now()) {
|
||||
return fmt.Errorf("timeout waiting for service to stop")
|
||||
}
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
status, err = s.Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query service status: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err = s.Delete()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete service: %v", err)
|
||||
}
|
||||
|
||||
err = eventlog.Remove(serviceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove event log: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func startService(args []string) error {
|
||||
fmt.Printf("Starting service with args: %v\n", args)
|
||||
|
||||
// Always save the service arguments so they can be loaded on service restart
|
||||
err := saveServiceArgs(args)
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: failed to save service args: %v\n", err)
|
||||
// Continue anyway, args will still be passed directly
|
||||
} else {
|
||||
fmt.Printf("Saved service args to: %s\n", getServiceArgsPath())
|
||||
}
|
||||
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to service manager: %v", err)
|
||||
}
|
||||
defer m.Disconnect()
|
||||
|
||||
s, err := m.OpenService(serviceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s is not installed", serviceName)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
// Pass arguments directly to the service start call
|
||||
// Note: These args will appear in Execute() after the service name
|
||||
err = s.Start(args...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start service: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopService() error {
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to service manager: %v", err)
|
||||
}
|
||||
defer m.Disconnect()
|
||||
|
||||
s, err := m.OpenService(serviceName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("service %s is not installed", serviceName)
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
status, err := s.Control(svc.Stop)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stop service: %v", err)
|
||||
}
|
||||
|
||||
timeout := time.Now().Add(30 * time.Second)
|
||||
for status.State != svc.Stopped {
|
||||
if timeout.Before(time.Now()) {
|
||||
return fmt.Errorf("timeout waiting for service to stop")
|
||||
}
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
status, err = s.Query()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query service status: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func debugService(args []string) error {
|
||||
// Save the service arguments before starting
|
||||
if len(args) > 0 {
|
||||
err := saveServiceArgs(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save service args: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service with the provided arguments
|
||||
err := startService(args)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start service: %v", err)
|
||||
}
|
||||
|
||||
// Watch the log file
|
||||
return watchLogFile(true)
|
||||
}
|
||||
|
||||
func watchLogFile(end bool) error {
|
||||
logDir := filepath.Join(os.Getenv("PROGRAMDATA"), "newt", "logs")
|
||||
logPath := filepath.Join(logDir, "newt.log")
|
||||
|
||||
// Ensure the log directory exists
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create log directory: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the log file to be created if it doesn't exist
|
||||
var file *os.File
|
||||
for i := 0; i < 30; i++ { // Wait up to 15 seconds
|
||||
file, err = os.Open(logPath)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
if i == 0 {
|
||||
fmt.Printf("Waiting for log file to be created...\n")
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open log file after waiting: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Seek to the end of the file to only show new logs
|
||||
_, err = file.Seek(0, 2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to seek to end of file: %v", err)
|
||||
}
|
||||
|
||||
// Set up signal handling for graceful exit
|
||||
sigCh := make(chan os.Signal, 1)
|
||||
signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
// Create a ticker to check for new content
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
||||
buffer := make([]byte, 4096)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-sigCh:
|
||||
fmt.Printf("\n\nStopping log watch...\n")
|
||||
// stop the service if needed
|
||||
if end {
|
||||
fmt.Printf("Stopping service...\n")
|
||||
stopService()
|
||||
}
|
||||
fmt.Printf("Log watch stopped.\n")
|
||||
return nil
|
||||
case <-ticker.C:
|
||||
// Read new content
|
||||
n, err := file.Read(buffer)
|
||||
if err != nil && err != io.EOF {
|
||||
// Try to reopen the file in case it was recreated
|
||||
file.Close()
|
||||
file, err = os.Open(logPath)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
// Print the new content
|
||||
fmt.Print(string(buffer[:n]))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getServiceStatus() (string, error) {
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to connect to service manager: %v", err)
|
||||
}
|
||||
defer m.Disconnect()
|
||||
|
||||
s, err := m.OpenService(serviceName)
|
||||
if err != nil {
|
||||
return "Not Installed", nil
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
status, err := s.Query()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to query service status: %v", err)
|
||||
}
|
||||
|
||||
switch status.State {
|
||||
case svc.Stopped:
|
||||
return "Stopped", nil
|
||||
case svc.StartPending:
|
||||
return "Starting", nil
|
||||
case svc.StopPending:
|
||||
return "Stopping", nil
|
||||
case svc.Running:
|
||||
return "Running", nil
|
||||
case svc.ContinuePending:
|
||||
return "Continue Pending", nil
|
||||
case svc.PausePending:
|
||||
return "Pause Pending", nil
|
||||
case svc.Paused:
|
||||
return "Paused", nil
|
||||
default:
|
||||
return "Unknown", nil
|
||||
}
|
||||
}
|
||||
|
||||
// showServiceConfig displays current saved service configuration
|
||||
func showServiceConfig() {
|
||||
configPath := getServiceArgsPath()
|
||||
fmt.Printf("Service configuration file: %s\n", configPath)
|
||||
|
||||
args, err := loadServiceArgs()
|
||||
if err != nil {
|
||||
fmt.Printf("No saved configuration found or error loading: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
fmt.Println("No saved service arguments found")
|
||||
} else {
|
||||
fmt.Printf("Saved service arguments: %v\n", args)
|
||||
}
|
||||
}
|
||||
|
||||
func isWindowsService() bool {
|
||||
isWindowsService, err := svc.IsWindowsService()
|
||||
return err == nil && isWindowsService
|
||||
}
|
||||
|
||||
// rotateLogFile handles daily log rotation
|
||||
func rotateLogFile(logDir string, logFile string) error {
|
||||
// Get current log file info
|
||||
info, err := os.Stat(logFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil // No current log file to rotate
|
||||
}
|
||||
return fmt.Errorf("failed to stat log file: %v", err)
|
||||
}
|
||||
|
||||
// Check if log file is from today
|
||||
now := time.Now()
|
||||
fileTime := info.ModTime()
|
||||
|
||||
// If the log file is from today, no rotation needed
|
||||
if now.Year() == fileTime.Year() && now.YearDay() == fileTime.YearDay() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create rotated filename with date
|
||||
rotatedName := fmt.Sprintf("newt-%s.log", fileTime.Format("2006-01-02"))
|
||||
rotatedPath := filepath.Join(logDir, rotatedName)
|
||||
|
||||
// Rename current log file to dated filename
|
||||
err = os.Rename(logFile, rotatedPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to rotate log file: %v", err)
|
||||
}
|
||||
|
||||
// Clean up old log files (keep last 30 days)
|
||||
cleanupOldLogFiles(logDir, 30)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupOldLogFiles removes log files older than specified days
|
||||
func cleanupOldLogFiles(logDir string, daysToKeep int) {
|
||||
cutoff := time.Now().AddDate(0, 0, -daysToKeep)
|
||||
|
||||
files, err := os.ReadDir(logDir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && strings.HasPrefix(file.Name(), "newt-") && strings.HasSuffix(file.Name(), ".log") {
|
||||
filePath := filepath.Join(logDir, file.Name())
|
||||
info, err := file.Info()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info.ModTime().Before(cutoff) {
|
||||
os.Remove(filePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setupWindowsEventLog() {
|
||||
// Create log directory if it doesn't exist
|
||||
logDir := filepath.Join(os.Getenv("PROGRAMDATA"), "newt", "logs")
|
||||
err := os.MkdirAll(logDir, 0755)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to create log directory: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
logFile := filepath.Join(logDir, "newt.log")
|
||||
|
||||
// Rotate log file if needed
|
||||
err = rotateLogFile(logDir, logFile)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to rotate log file: %v\n", err)
|
||||
// Continue anyway to create new log file
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to open log file: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Set the custom logger output
|
||||
logger.GetLogger().SetOutput(file)
|
||||
|
||||
log.Printf("Newt service logging initialized - log file: %s", logFile)
|
||||
}
|
||||
|
||||
// handleServiceCommand checks for service management commands and returns true if handled
|
||||
func handleServiceCommand() bool {
|
||||
if len(os.Args) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
command := os.Args[1]
|
||||
|
||||
switch command {
|
||||
case "install":
|
||||
err := installService()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to install service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Service installed successfully")
|
||||
return true
|
||||
case "remove", "uninstall":
|
||||
err := removeService()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to remove service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Service removed successfully")
|
||||
return true
|
||||
case "start":
|
||||
// Pass the remaining arguments (after "start") to the service
|
||||
serviceArgs := os.Args[2:]
|
||||
err := startService(serviceArgs)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to start service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Service started successfully")
|
||||
return true
|
||||
case "stop":
|
||||
err := stopService()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to stop service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Service stopped successfully")
|
||||
return true
|
||||
case "status":
|
||||
status, err := getServiceStatus()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get service status: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Service status: %s\n", status)
|
||||
return true
|
||||
case "debug":
|
||||
// get the status and if it is Not Installed then install it first
|
||||
status, err := getServiceStatus()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get service status: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if status == "Not Installed" {
|
||||
err := installService()
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to install service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Println("Service installed successfully, now running in debug mode")
|
||||
}
|
||||
|
||||
// Pass the remaining arguments (after "debug") to the service
|
||||
serviceArgs := os.Args[2:]
|
||||
err = debugService(serviceArgs)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to debug service: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return true
|
||||
case "logs":
|
||||
err := watchLogFile(false)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to watch log file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return true
|
||||
case "config":
|
||||
showServiceConfig()
|
||||
return true
|
||||
case "service-help":
|
||||
fmt.Println("Newt WireGuard Tunnel")
|
||||
fmt.Println("\nWindows Service Management:")
|
||||
fmt.Println(" install Install the service")
|
||||
fmt.Println(" remove Remove the service")
|
||||
fmt.Println(" start [args] Start the service with optional arguments")
|
||||
fmt.Println(" stop Stop the service")
|
||||
fmt.Println(" status Show service status")
|
||||
fmt.Println(" debug [args] Run service in debug mode with optional arguments")
|
||||
fmt.Println(" logs Tail the service log file")
|
||||
fmt.Println(" config Show current service configuration")
|
||||
fmt.Println(" service-help Show this service help")
|
||||
fmt.Println("\nExamples:")
|
||||
fmt.Println(" newt start --endpoint https://example.com --id myid --secret mysecret")
|
||||
fmt.Println(" newt debug --endpoint https://example.com --id myid --secret mysecret")
|
||||
fmt.Println("\nFor normal console mode, run with standard flags (e.g., newt --endpoint ...)")
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
39
stub.go
Normal file
39
stub.go
Normal file
@@ -0,0 +1,39 @@
|
||||
//go:build !linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/fosrl/newt/proxy"
|
||||
"github.com/fosrl/newt/websocket"
|
||||
)
|
||||
|
||||
func setupClientsNative(client *websocket.Client, host string) {
|
||||
_ = client
|
||||
_ = host
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
|
||||
func closeWgServiceNative() {
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
|
||||
func clientsOnConnectNative() {
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
|
||||
func clientsHandleNewtConnectionNative(publicKey, endpoint string) {
|
||||
_ = publicKey
|
||||
_ = endpoint
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
|
||||
func clientsAddProxyTargetNative(pm *proxy.ProxyManager, tunnelIp string) {
|
||||
_ = pm
|
||||
_ = tunnelIp
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
|
||||
func clientsStartDirectRelayNative(tunnelIP string) {
|
||||
_ = tunnelIP
|
||||
// No-op for non-Linux systems
|
||||
}
|
||||
49
udp_client.py
Normal file
49
udp_client.py
Normal file
@@ -0,0 +1,49 @@
|
||||
import socket
|
||||
import sys
|
||||
|
||||
# Argument parsing: Check if IP and Port are provided
|
||||
if len(sys.argv) != 3:
|
||||
print("Usage: python udp_client.py <HOST_IP> <HOST_PORT>")
|
||||
# Example: python udp_client.py 127.0.0.1 12000
|
||||
sys.exit(1)
|
||||
|
||||
HOST = sys.argv[1]
|
||||
try:
|
||||
PORT = int(sys.argv[2])
|
||||
except ValueError:
|
||||
print("Error: HOST_PORT must be an integer.")
|
||||
sys.exit(1)
|
||||
|
||||
# The message to send to the server
|
||||
MESSAGE = "Hello UDP Server! How are you?"
|
||||
|
||||
# Create a UDP socket
|
||||
try:
|
||||
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
except socket.error as err:
|
||||
print(f"Failed to create socket: {err}")
|
||||
sys.exit()
|
||||
|
||||
try:
|
||||
print(f"Sending message to {HOST}:{PORT}...")
|
||||
|
||||
# Send the message (data must be encoded to bytes)
|
||||
client_socket.sendto(MESSAGE.encode('utf-8'), (HOST, PORT))
|
||||
|
||||
# Wait for the server's response (buffer size 1024 bytes)
|
||||
data, server_address = client_socket.recvfrom(1024)
|
||||
|
||||
# Decode and print the server's response
|
||||
response = data.decode('utf-8')
|
||||
print("-" * 30)
|
||||
print(f"Received response from server {server_address[0]}:{server_address[1]}:")
|
||||
print(f"-> Data: '{response}'")
|
||||
|
||||
except socket.error as err:
|
||||
print(f"Error during communication: {err}")
|
||||
|
||||
finally:
|
||||
# Close the socket
|
||||
client_socket.close()
|
||||
print("-" * 30)
|
||||
print("Client finished and socket closed.")
|
||||
58
udp_server.py
Normal file
58
udp_server.py
Normal file
@@ -0,0 +1,58 @@
|
||||
import socket
|
||||
import sys
|
||||
|
||||
# optionally take in some positional args for the port
|
||||
if len(sys.argv) > 1:
|
||||
try:
|
||||
PORT = int(sys.argv[1])
|
||||
except ValueError:
|
||||
print("Invalid port number. Using default port 12000.")
|
||||
PORT = 12000
|
||||
else:
|
||||
PORT = 12000
|
||||
|
||||
# Define the server host and port
|
||||
HOST = '0.0.0.0' # Standard loopback interface address (localhost)
|
||||
|
||||
# Create a UDP socket
|
||||
try:
|
||||
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
except socket.error as err:
|
||||
print(f"Failed to create socket: {err}")
|
||||
sys.exit()
|
||||
|
||||
# Bind the socket to the address
|
||||
try:
|
||||
server_socket.bind((HOST, PORT))
|
||||
print(f"UDP Server listening on {HOST}:{PORT}")
|
||||
except socket.error as err:
|
||||
print(f"Bind failed: {err}")
|
||||
server_socket.close()
|
||||
sys.exit()
|
||||
|
||||
# Wait for and process incoming data
|
||||
while True:
|
||||
try:
|
||||
# Receive data and the client's address (buffer size 1024 bytes)
|
||||
data, client_address = server_socket.recvfrom(1024)
|
||||
|
||||
# Decode the data and print the message
|
||||
message = data.decode('utf-8')
|
||||
print("-" * 30)
|
||||
print(f"Received message from {client_address[0]}:{client_address[1]}:")
|
||||
print(f"-> Data: '{message}'")
|
||||
|
||||
# Prepare the response message
|
||||
response_message = f"Hello client! Server received: '{message.upper()}'"
|
||||
|
||||
# Send the response back to the client
|
||||
server_socket.sendto(response_message.encode('utf-8'), client_address)
|
||||
print(f"Sent response back to client.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"An error occurred: {e}")
|
||||
break
|
||||
|
||||
# Clean up (though usually unreachable in an infinite server loop)
|
||||
server_socket.close()
|
||||
print("Server stopped.")
|
||||
173
updates/updates.go
Normal file
173
updates/updates.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package updates
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GitHubRelease represents the GitHub API response for a release
|
||||
type GitHubRelease struct {
|
||||
TagName string `json:"tag_name"`
|
||||
Name string `json:"name"`
|
||||
HTMLURL string `json:"html_url"`
|
||||
}
|
||||
|
||||
// Version represents a semantic version
|
||||
type Version struct {
|
||||
Major int
|
||||
Minor int
|
||||
Patch int
|
||||
}
|
||||
|
||||
// parseVersion parses a semantic version string (e.g., "v1.2.3" or "1.2.3")
|
||||
func parseVersion(versionStr string) (Version, error) {
|
||||
// Remove 'v' prefix if present
|
||||
versionStr = strings.TrimPrefix(versionStr, "v")
|
||||
|
||||
parts := strings.Split(versionStr, ".")
|
||||
if len(parts) != 3 {
|
||||
return Version{}, fmt.Errorf("invalid version format: %s", versionStr)
|
||||
}
|
||||
|
||||
major, err := strconv.Atoi(parts[0])
|
||||
if err != nil {
|
||||
return Version{}, fmt.Errorf("invalid major version: %s", parts[0])
|
||||
}
|
||||
|
||||
minor, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return Version{}, fmt.Errorf("invalid minor version: %s", parts[1])
|
||||
}
|
||||
|
||||
patch, err := strconv.Atoi(parts[2])
|
||||
if err != nil {
|
||||
return Version{}, fmt.Errorf("invalid patch version: %s", parts[2])
|
||||
}
|
||||
|
||||
return Version{Major: major, Minor: minor, Patch: patch}, nil
|
||||
}
|
||||
|
||||
// isNewer returns true if v2 is newer than v1
|
||||
func (v1 Version) isNewer(v2 Version) bool {
|
||||
if v2.Major > v1.Major {
|
||||
return true
|
||||
}
|
||||
if v2.Major < v1.Major {
|
||||
return false
|
||||
}
|
||||
|
||||
if v2.Minor > v1.Minor {
|
||||
return true
|
||||
}
|
||||
if v2.Minor < v1.Minor {
|
||||
return false
|
||||
}
|
||||
|
||||
return v2.Patch > v1.Patch
|
||||
}
|
||||
|
||||
// String returns the version as a string
|
||||
func (v Version) String() string {
|
||||
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
|
||||
}
|
||||
|
||||
// CheckForUpdate checks GitHub for a newer version and prints an update banner if found
|
||||
func CheckForUpdate(owner, repo, currentVersion string) error {
|
||||
if currentVersion == "version_replaceme" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GitHub API URL for latest release
|
||||
url := fmt.Sprintf("https://api.github.com/repos/%s/%s/releases/latest", owner, repo)
|
||||
|
||||
// Create HTTP client with timeout
|
||||
client := &http.Client{
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch release info: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("GitHub API returned status: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Parse the JSON response
|
||||
var release GitHubRelease
|
||||
if err := json.NewDecoder(resp.Body).Decode(&release); err != nil {
|
||||
return fmt.Errorf("failed to parse release info: %w", err)
|
||||
}
|
||||
|
||||
// Parse current and latest versions
|
||||
currentVer, err := parseVersion(currentVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid current version: %w", err)
|
||||
}
|
||||
|
||||
latestVer, err := parseVersion(release.TagName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid latest version: %w", err)
|
||||
}
|
||||
|
||||
// Check if update is available
|
||||
if currentVer.isNewer(latestVer) {
|
||||
printUpdateBanner(currentVer.String(), latestVer.String(), "curl -fsSL https://static.pangolin.net/get-newt.sh | bash")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// printUpdateBanner prints a colorful update notification banner
|
||||
func printUpdateBanner(currentVersion, latestVersion, releaseURL string) {
|
||||
const contentWidth = 70 // width between the border lines
|
||||
|
||||
borderTop := "╔" + strings.Repeat("═", contentWidth) + "╗"
|
||||
borderMid := "╠" + strings.Repeat("═", contentWidth) + "╣"
|
||||
borderBot := "╚" + strings.Repeat("═", contentWidth) + "╝"
|
||||
emptyLine := "║" + strings.Repeat(" ", contentWidth) + "║"
|
||||
|
||||
lines := []string{
|
||||
borderTop,
|
||||
"║" + centerText("UPDATE AVAILABLE", contentWidth) + "║",
|
||||
borderMid,
|
||||
emptyLine,
|
||||
"║ Current Version: " + padRight(currentVersion, contentWidth-19) + "║",
|
||||
"║ Latest Version: " + padRight(latestVersion, contentWidth-19) + "║",
|
||||
emptyLine,
|
||||
"║ A newer version is available! Please update to get the" + padRight("", contentWidth-56) + "║",
|
||||
"║ latest features, bug fixes, and security improvements." + padRight("", contentWidth-56) + "║",
|
||||
emptyLine,
|
||||
"║ Update: " + padRight(releaseURL, contentWidth-10) + "║",
|
||||
emptyLine,
|
||||
borderBot,
|
||||
}
|
||||
|
||||
for _, line := range lines {
|
||||
fmt.Println(line)
|
||||
}
|
||||
}
|
||||
|
||||
// padRight pads s with spaces on the right to the given width
|
||||
func padRight(s string, width int) string {
|
||||
if len(s) > width {
|
||||
return s[:width]
|
||||
}
|
||||
return s + strings.Repeat(" ", width-len(s))
|
||||
}
|
||||
|
||||
// centerText centers s in a field of width w
|
||||
func centerText(s string, w int) string {
|
||||
if len(s) >= w {
|
||||
return s[:w]
|
||||
}
|
||||
padding := (w - len(s)) / 2
|
||||
return strings.Repeat(" ", padding) + s + strings.Repeat(" ", w-len(s)-padding)
|
||||
}
|
||||
226
util/util.go
Normal file
226
util/util.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
mathrand "math/rand/v2"
|
||||
|
||||
"github.com/fosrl/newt/logger"
|
||||
"golang.zx2c4.com/wireguard/device"
|
||||
)
|
||||
|
||||
func ResolveDomain(domain string) (string, error) {
|
||||
// trim whitespace
|
||||
domain = strings.TrimSpace(domain)
|
||||
|
||||
// Remove any protocol prefix if present (do this first, before splitting host/port)
|
||||
domain = strings.TrimPrefix(domain, "http://")
|
||||
domain = strings.TrimPrefix(domain, "https://")
|
||||
|
||||
// if there are any trailing slashes, remove them
|
||||
domain = strings.TrimSuffix(domain, "/")
|
||||
|
||||
// Check if there's a port in the domain
|
||||
host, port, err := net.SplitHostPort(domain)
|
||||
if err != nil {
|
||||
// No port found, use the domain as is
|
||||
host = domain
|
||||
port = ""
|
||||
}
|
||||
|
||||
// Check if host is already an IP address (IPv4 or IPv6)
|
||||
// For IPv6, the host from SplitHostPort will already have brackets stripped
|
||||
// but if there was no port, we need to handle bracketed IPv6 addresses
|
||||
cleanHost := strings.TrimPrefix(strings.TrimSuffix(host, "]"), "[")
|
||||
if ip := net.ParseIP(cleanHost); ip != nil {
|
||||
// It's already an IP address, no need to resolve
|
||||
ipAddr := ip.String()
|
||||
if port != "" {
|
||||
return net.JoinHostPort(ipAddr, port), nil
|
||||
}
|
||||
return ipAddr, nil
|
||||
}
|
||||
|
||||
// Lookup IP addresses
|
||||
ips, err := net.LookupIP(host)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("DNS lookup failed: %v", err)
|
||||
}
|
||||
|
||||
if len(ips) == 0 {
|
||||
return "", fmt.Errorf("no IP addresses found for domain %s", host)
|
||||
}
|
||||
|
||||
// Get the first IPv4 address if available
|
||||
var ipAddr string
|
||||
for _, ip := range ips {
|
||||
if ipv4 := ip.To4(); ipv4 != nil {
|
||||
ipAddr = ipv4.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If no IPv4 found, use the first IP (might be IPv6)
|
||||
if ipAddr == "" {
|
||||
ipAddr = ips[0].String()
|
||||
}
|
||||
|
||||
// Add port back if it existed
|
||||
if port != "" {
|
||||
ipAddr = net.JoinHostPort(ipAddr, port)
|
||||
}
|
||||
|
||||
return ipAddr, nil
|
||||
}
|
||||
|
||||
func ParseLogLevel(level string) logger.LogLevel {
|
||||
switch strings.ToUpper(level) {
|
||||
case "DEBUG":
|
||||
return logger.DEBUG
|
||||
case "INFO":
|
||||
return logger.INFO
|
||||
case "WARN":
|
||||
return logger.WARN
|
||||
case "ERROR":
|
||||
return logger.ERROR
|
||||
case "FATAL":
|
||||
return logger.FATAL
|
||||
default:
|
||||
return logger.INFO // default to INFO if invalid level provided
|
||||
}
|
||||
}
|
||||
|
||||
// find an available UDP port in the range [minPort, maxPort] and also the next port for the wgtester
|
||||
func FindAvailableUDPPort(minPort, maxPort uint16) (uint16, error) {
|
||||
if maxPort < minPort {
|
||||
return 0, fmt.Errorf("invalid port range: min=%d, max=%d", minPort, maxPort)
|
||||
}
|
||||
|
||||
// We need to check port+1 as well, so adjust the max port to avoid going out of range
|
||||
adjustedMaxPort := maxPort - 1
|
||||
if adjustedMaxPort < minPort {
|
||||
return 0, fmt.Errorf("insufficient port range to find consecutive ports: min=%d, max=%d", minPort, maxPort)
|
||||
}
|
||||
|
||||
// Create a slice of all ports in the range (excluding the last one)
|
||||
portRange := make([]uint16, adjustedMaxPort-minPort+1)
|
||||
for i := range portRange {
|
||||
portRange[i] = minPort + uint16(i)
|
||||
}
|
||||
|
||||
// Fisher-Yates shuffle to randomize the port order
|
||||
for i := len(portRange) - 1; i > 0; i-- {
|
||||
j := mathrand.IntN(i + 1)
|
||||
portRange[i], portRange[j] = portRange[j], portRange[i]
|
||||
}
|
||||
|
||||
// Try each port in the randomized order
|
||||
for _, port := range portRange {
|
||||
// Check if port is available
|
||||
addr1 := &net.UDPAddr{
|
||||
IP: net.ParseIP("127.0.0.1"),
|
||||
Port: int(port),
|
||||
}
|
||||
conn1, err1 := net.ListenUDP("udp", addr1)
|
||||
if err1 != nil {
|
||||
continue // Port is in use or there was an error, try next port
|
||||
}
|
||||
|
||||
conn1.Close()
|
||||
return port, nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("no available consecutive UDP ports found in range %d-%d", minPort, maxPort)
|
||||
}
|
||||
|
||||
func FixKey(key string) string {
|
||||
// Remove any whitespace
|
||||
key = strings.TrimSpace(key)
|
||||
|
||||
// Decode from base64
|
||||
decoded, err := base64.StdEncoding.DecodeString(key)
|
||||
if err != nil {
|
||||
logger.Fatal("Error decoding base64: %v", err)
|
||||
}
|
||||
|
||||
// Convert to hex
|
||||
return hex.EncodeToString(decoded)
|
||||
}
|
||||
|
||||
// this is the opposite of FixKey
|
||||
func UnfixKey(hexKey string) string {
|
||||
// Decode from hex
|
||||
decoded, err := hex.DecodeString(hexKey)
|
||||
if err != nil {
|
||||
logger.Fatal("Error decoding hex: %v", err)
|
||||
}
|
||||
|
||||
// Convert to base64
|
||||
return base64.StdEncoding.EncodeToString(decoded)
|
||||
}
|
||||
|
||||
func MapToWireGuardLogLevel(level logger.LogLevel) int {
|
||||
switch level {
|
||||
case logger.DEBUG:
|
||||
return device.LogLevelVerbose
|
||||
// case logger.INFO:
|
||||
// return device.LogLevel
|
||||
case logger.WARN:
|
||||
return device.LogLevelError
|
||||
case logger.ERROR, logger.FATAL:
|
||||
return device.LogLevelSilent
|
||||
default:
|
||||
return device.LogLevelSilent
|
||||
}
|
||||
}
|
||||
|
||||
// GetProtocol returns protocol number from IPv4 packet (fast path)
|
||||
func GetProtocol(packet []byte) (uint8, bool) {
|
||||
if len(packet) < 20 {
|
||||
return 0, false
|
||||
}
|
||||
version := packet[0] >> 4
|
||||
if version == 4 {
|
||||
return packet[9], true
|
||||
} else if version == 6 {
|
||||
if len(packet) < 40 {
|
||||
return 0, false
|
||||
}
|
||||
return packet[6], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// GetDestPort returns destination port from TCP/UDP packet (fast path)
|
||||
func GetDestPort(packet []byte) (uint16, bool) {
|
||||
if len(packet) < 20 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
version := packet[0] >> 4
|
||||
var headerLen int
|
||||
|
||||
if version == 4 {
|
||||
ihl := packet[0] & 0x0F
|
||||
headerLen = int(ihl) * 4
|
||||
if len(packet) < headerLen+4 {
|
||||
return 0, false
|
||||
}
|
||||
} else if version == 6 {
|
||||
headerLen = 40
|
||||
if len(packet) < headerLen+4 {
|
||||
return 0, false
|
||||
}
|
||||
} else {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Destination port is at bytes 2-3 of TCP/UDP header
|
||||
port := binary.BigEndian.Uint16(packet[headerLen+2 : headerLen+4])
|
||||
return port, true
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user