forked from github-starred/komodo
Compare commits
1626 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
951cb82e0c | ||
|
|
0643f96053 | ||
|
|
56d835f2d2 | ||
|
|
d8fb8f8649 | ||
|
|
7197d628e5 | ||
|
|
96083178dd | ||
|
|
9d1b705ab1 | ||
|
|
2582bc9ba3 | ||
|
|
44f34b9b40 | ||
|
|
bbb18d8280 | ||
|
|
da95b7d074 | ||
|
|
6b25309aed | ||
|
|
f8e371af31 | ||
|
|
a0f5ae8c7f | ||
|
|
2f371af288 | ||
|
|
76840efddc | ||
|
|
8f01e441a4 | ||
|
|
41a6e0a65a | ||
|
|
40027f7430 | ||
|
|
a2c69aba87 | ||
|
|
a5d3fbedc6 | ||
|
|
b311b11785 | ||
|
|
7a0b29b387 | ||
|
|
d3a87fdb5f | ||
|
|
9b7ab6d98a | ||
|
|
c302e28d86 | ||
|
|
33be989e3a | ||
|
|
c9d65300c9 | ||
|
|
e96b676366 | ||
|
|
0bff4a5e51 | ||
|
|
9b12334922 | ||
|
|
68659630fc | ||
|
|
8b33647620 | ||
|
|
871aba62d5 | ||
|
|
c649094a8a | ||
|
|
e3c11db89e | ||
|
|
c43293109d | ||
|
|
d3e4f9f638 | ||
|
|
eecf583b0e | ||
|
|
a518806d8b | ||
|
|
8a4611c380 | ||
|
|
d679fbe72f | ||
|
|
1cf02bc4b4 | ||
|
|
bf703eef35 | ||
|
|
985058afb0 | ||
|
|
eac1145958 | ||
|
|
1b408d92d9 | ||
|
|
ee95c2c76b | ||
|
|
e83124ebff | ||
|
|
e912ae050a | ||
|
|
99253d6182 | ||
|
|
ef91577ac5 | ||
|
|
b97f9b30b3 | ||
|
|
7cb11dbc5d | ||
|
|
6d815629fc | ||
|
|
f8021d8541 | ||
|
|
1f444fdbc2 | ||
|
|
af76dd1be4 | ||
|
|
5cb91c6f8d | ||
|
|
de5502aec7 | ||
|
|
ef4ae4c5f2 | ||
|
|
866eb6d81b | ||
|
|
58d6c16eea | ||
|
|
ccbf13ae84 | ||
|
|
21f6acd3d7 | ||
|
|
dce59d1383 | ||
|
|
2fb544c3b0 | ||
|
|
1ba288be79 | ||
|
|
1ff21d2986 | ||
|
|
79cc2c1bb7 | ||
|
|
17b2e6660c | ||
|
|
4ef095fe55 | ||
|
|
fb0a7352e3 | ||
|
|
9a087e5975 | ||
|
|
814e47031d | ||
|
|
1304565e40 | ||
|
|
85616d0669 | ||
|
|
feff4647e7 | ||
|
|
549e15bfe2 | ||
|
|
a08baf8432 | ||
|
|
99c47ce133 | ||
|
|
26a4691c0b | ||
|
|
addb35aa69 | ||
|
|
16bf78f9ad | ||
|
|
3ed4f91d82 | ||
|
|
653fb894a2 | ||
|
|
0f9798a5f2 | ||
|
|
6776a20ec5 | ||
|
|
fb21e8586f | ||
|
|
8b2c4d604a | ||
|
|
c0b010d5ce | ||
|
|
97de34a088 | ||
|
|
6c0b76a270 | ||
|
|
eebd44ab9b | ||
|
|
783250c5ce | ||
|
|
70ff93050f | ||
|
|
1cc1813185 | ||
|
|
b4f9b87d06 | ||
|
|
26b09a767e | ||
|
|
bba6c4d8b6 | ||
|
|
ea440235c4 | ||
|
|
f9949bf988 | ||
|
|
b978db012e | ||
|
|
bc2fbdd657 | ||
|
|
a5571bcf4d | ||
|
|
683a528dd9 | ||
|
|
4a283b6052 | ||
|
|
37224ee1ad | ||
|
|
5e7445b10d | ||
|
|
1829a7da34 | ||
|
|
4a1a653bd9 | ||
|
|
840c1a87d0 | ||
|
|
c90368e2af | ||
|
|
1f9d74fadb | ||
|
|
5b261058fe | ||
|
|
cf6632ba02 | ||
|
|
c7124bd63c | ||
|
|
ba19e45607 | ||
|
|
20282ffcbb | ||
|
|
cb8ad90838 | ||
|
|
caac3fdcc4 | ||
|
|
44da282060 | ||
|
|
a2e27b09fc | ||
|
|
c1b1f397fd | ||
|
|
1d0f239594 | ||
|
|
549bc78799 | ||
|
|
9eb9b57e36 | ||
|
|
c38849961e | ||
|
|
3acfa0c4b1 | ||
|
|
62b083c3be | ||
|
|
ee6fc4c590 | ||
|
|
4fa550b3d3 | ||
|
|
1c44ae98fb | ||
|
|
148223f995 | ||
|
|
4678f83542 | ||
|
|
340bac078f | ||
|
|
3f8e75bbd8 | ||
|
|
c4278d14a9 | ||
|
|
4909106c3c | ||
|
|
0f04a2848a | ||
|
|
ba073bf8b2 | ||
|
|
640809aa6b | ||
|
|
943bb4c61a | ||
|
|
ef43cb7920 | ||
|
|
1abe634679 | ||
|
|
18ab18b6f6 | ||
|
|
aad971a599 | ||
|
|
3d9da97d7b | ||
|
|
2d0a09f760 | ||
|
|
568c317d6d | ||
|
|
f0e1f253f4 | ||
|
|
7634b9fa1d | ||
|
|
2e1795eba6 | ||
|
|
0f6b3d6e9b | ||
|
|
a70afcc461 | ||
|
|
d78fc2b282 | ||
|
|
249610afce | ||
|
|
b93b639b40 | ||
|
|
13ceb55fe8 | ||
|
|
7ebd38d350 | ||
|
|
c12c74005e | ||
|
|
c44dc1c6f6 | ||
|
|
cd2b9ec4ed | ||
|
|
dc62442c00 | ||
|
|
81a90f7ae4 | ||
|
|
af092e0d88 | ||
|
|
0e70015fd1 | ||
|
|
a214deef86 | ||
|
|
0efb2966b0 | ||
|
|
1e8422a506 | ||
|
|
7bb386e6d0 | ||
|
|
c464ca5612 | ||
|
|
742630fdee | ||
|
|
154fd899fe | ||
|
|
aa25f9d4c9 | ||
|
|
eeed94c8fd | ||
|
|
b2f771199d | ||
|
|
4bceb97a66 | ||
|
|
8734e6fc4c | ||
|
|
effd9315cb | ||
|
|
d45f60f604 | ||
|
|
1c7582d0a2 | ||
|
|
566e1090da | ||
|
|
a9b3054de3 | ||
|
|
73d179b355 | ||
|
|
97486242a0 | ||
|
|
a0ddff0618 | ||
|
|
f91e95bf63 | ||
|
|
431ef82f3e | ||
|
|
a07bc9fbca | ||
|
|
1e13cd9261 | ||
|
|
ae0b59179d | ||
|
|
a9ef12d687 | ||
|
|
b73c13172a | ||
|
|
c9b41e7449 | ||
|
|
fccc15df4a | ||
|
|
4f7eeacebc | ||
|
|
9b172a833a | ||
|
|
fd6fc925d6 | ||
|
|
6318670b6c | ||
|
|
7fa5fd83d2 | ||
|
|
e286acb123 | ||
|
|
ace6dd5a9a | ||
|
|
de70ab5eda | ||
|
|
56ab104ac5 | ||
|
|
56da5c04f2 | ||
|
|
3d73f325fe | ||
|
|
7b778631f3 | ||
|
|
de4c657868 | ||
|
|
b6df4a08b1 | ||
|
|
923c1d6cf6 | ||
|
|
ff892afa16 | ||
|
|
3e9e0a9be4 | ||
|
|
dc796900cd | ||
|
|
b8afd43d07 | ||
|
|
ba52ce79fc | ||
|
|
9936f9f357 | ||
|
|
677e1a3830 | ||
|
|
d46ff30540 | ||
|
|
98453580c0 | ||
|
|
5683929bbe | ||
|
|
ca368340d5 | ||
|
|
e66d2fac95 | ||
|
|
1c74d388dc | ||
|
|
6e698fec05 | ||
|
|
d06b2abea4 | ||
|
|
e7a4a364c2 | ||
|
|
31bcbf36dd | ||
|
|
afbf28668b | ||
|
|
7427a6d6d1 | ||
|
|
fab4e8e534 | ||
|
|
0fc6e89ffe | ||
|
|
9509b23dc1 | ||
|
|
b5ea6d43f3 | ||
|
|
92ef0addd5 | ||
|
|
a61d50b049 | ||
|
|
9021f1beea | ||
|
|
f3ecf30b3d | ||
|
|
ea40073fcc | ||
|
|
ed7e0a38d8 | ||
|
|
0d8d41f85d | ||
|
|
45bf8ae6b0 | ||
|
|
db9c2d924c | ||
|
|
62b34ab9a5 | ||
|
|
85157ddfb9 | ||
|
|
de746096ab | ||
|
|
f272612e74 | ||
|
|
d85bd25ed4 | ||
|
|
f39c786a64 | ||
|
|
26cae20505 | ||
|
|
58a8ebee0c | ||
|
|
5c8adf031c | ||
|
|
9b168d35d6 | ||
|
|
c4ece715f9 | ||
|
|
8e02de909f | ||
|
|
176b12f18c | ||
|
|
43514acc92 | ||
|
|
4894568651 | ||
|
|
f16d079e66 | ||
|
|
4ed71812a4 | ||
|
|
577b93e2dc | ||
|
|
c676b5168a | ||
|
|
4674af2f1b | ||
|
|
2429ab050d | ||
|
|
8698c0f5be | ||
|
|
c583a5dc62 | ||
|
|
528b74d156 | ||
|
|
ebedebd761 | ||
|
|
ee2953b2e9 | ||
|
|
7a2044b395 | ||
|
|
3123d021d9 | ||
|
|
c46b2cf59d | ||
|
|
79b4bae40a | ||
|
|
cb9e0ae252 | ||
|
|
a7a7d0552b | ||
|
|
01ea85e627 | ||
|
|
e81be79cb4 | ||
|
|
fca324480f | ||
|
|
762317e08a | ||
|
|
6d2f43e40a | ||
|
|
3823df8362 | ||
|
|
17398fc932 | ||
|
|
9b0e96a59a | ||
|
|
c5fdb914ff | ||
|
|
65ae5d9465 | ||
|
|
c4548f9e7e | ||
|
|
86cfb2ebc7 | ||
|
|
3bad049682 | ||
|
|
5352afee06 | ||
|
|
a83dedbcd0 | ||
|
|
beee584cc2 | ||
|
|
ccc7852576 | ||
|
|
0bdb3ddfea | ||
|
|
0f2b23bb6c | ||
|
|
a5537a0758 | ||
|
|
a64723269f | ||
|
|
2b93aa3dca | ||
|
|
992054f943 | ||
|
|
784aa754f7 | ||
|
|
5124d3aae8 | ||
|
|
0fb746bc03 | ||
|
|
a2d301bfbc | ||
|
|
4523f3e112 | ||
|
|
012cea8fce | ||
|
|
48a62232a7 | ||
|
|
46ee857c22 | ||
|
|
7bbd22b4b2 | ||
|
|
862c5b7a7c | ||
|
|
5eb7e27732 | ||
|
|
1231193c89 | ||
|
|
719938f442 | ||
|
|
7b5f2ea69b | ||
|
|
01709deced | ||
|
|
23c87bfaa4 | ||
|
|
0ef8e1861b | ||
|
|
10b278a141 | ||
|
|
72c884f9ec | ||
|
|
ecdae0e8df | ||
|
|
2e67b16ba2 | ||
|
|
18d31235d4 | ||
|
|
4f8c150c0b | ||
|
|
f26c937747 | ||
|
|
34176c336e | ||
|
|
635f3678ef | ||
|
|
12bd8cc265 | ||
|
|
64316a8a61 | ||
|
|
b85c3b25f7 | ||
|
|
144344bcfc | ||
|
|
7736ba8999 | ||
|
|
90c7bd56bf | ||
|
|
714edd70fb | ||
|
|
9c31813a16 | ||
|
|
d6b76134a3 | ||
|
|
b7692e39c8 | ||
|
|
60b5179b3e | ||
|
|
46a1c86cb6 | ||
|
|
393363b33e | ||
|
|
e762363d96 | ||
|
|
5d2082b478 | ||
|
|
ad09fd81b4 | ||
|
|
54fdbf9fd0 | ||
|
|
8c3de939da | ||
|
|
2f3a3f8f23 | ||
|
|
daf13d693d | ||
|
|
bbd5384589 | ||
|
|
fb72bbf81e | ||
|
|
29edd6b9b5 | ||
|
|
215b35575d | ||
|
|
6394275570 | ||
|
|
eaca72991b | ||
|
|
634e895469 | ||
|
|
77c8033d22 | ||
|
|
7508ae21b8 | ||
|
|
fd90a62af1 | ||
|
|
707dd682ed | ||
|
|
7396988032 | ||
|
|
58960cdc6e | ||
|
|
31a42dce7e | ||
|
|
9b248a8fa4 | ||
|
|
f099e5c6b7 | ||
|
|
5c093c81ab | ||
|
|
e73146533a | ||
|
|
db1237184f | ||
|
|
1e5ed1d29e | ||
|
|
d68ea2c28f | ||
|
|
9700ab2cb6 | ||
|
|
d8321c873e | ||
|
|
44784487a0 | ||
|
|
76471fa694 | ||
|
|
6ec7b4305c | ||
|
|
896a344ac7 | ||
|
|
34cefcdaf6 | ||
|
|
4c8eb68611 | ||
|
|
50866659ea | ||
|
|
8262cb858e | ||
|
|
e710317cac | ||
|
|
138cf781f3 | ||
|
|
70d315b2d7 | ||
|
|
bca8ca52da | ||
|
|
42c769ed56 | ||
|
|
592af39550 | ||
|
|
79620030bc | ||
|
|
294bc8712b | ||
|
|
f75d30d8ce | ||
|
|
de81e1e790 | ||
|
|
450f5c45a1 | ||
|
|
b864b32cb2 | ||
|
|
0d13ac8f38 | ||
|
|
edd517e21c | ||
|
|
32d356600a | ||
|
|
9e290278d0 | ||
|
|
4140dcb9dc | ||
|
|
1ed3d31011 | ||
|
|
3915f921f1 | ||
|
|
050571196c | ||
|
|
3c1a129ac9 | ||
|
|
57a3561aa8 | ||
|
|
ec37bfc0c6 | ||
|
|
82387e95da | ||
|
|
36b018151d | ||
|
|
b4754b64d5 | ||
|
|
3a5530a186 | ||
|
|
ee22044b34 | ||
|
|
8f5570128d | ||
|
|
24d2e744a4 | ||
|
|
bc9877133c | ||
|
|
05886bed71 | ||
|
|
c882c12890 | ||
|
|
12647896c4 | ||
|
|
ebbec7d68c | ||
|
|
cf054395bb | ||
|
|
8c04cf3db2 | ||
|
|
516690b260 | ||
|
|
fc602054ba | ||
|
|
e2244429ce | ||
|
|
b60ac6e1be | ||
|
|
6999fb8c2a | ||
|
|
fd84b5920b | ||
|
|
d02df02bbe | ||
|
|
1f143b814e | ||
|
|
65700ad70e | ||
|
|
6a16f1344a | ||
|
|
3ecacc69a5 | ||
|
|
9db2e858d2 | ||
|
|
f8cdf6bf45 | ||
|
|
1b97d85023 | ||
|
|
74a5f429e9 | ||
|
|
7554055767 | ||
|
|
f617f91d96 | ||
|
|
7b95e6b276 | ||
|
|
1510ed2147 | ||
|
|
f8237217b0 | ||
|
|
5f95fe16fd | ||
|
|
cf8fd893fa | ||
|
|
1032629800 | ||
|
|
12576711fa | ||
|
|
233c8085b5 | ||
|
|
1b1650eba4 | ||
|
|
73c17c68c4 | ||
|
|
3492eb3126 | ||
|
|
6dcc4b30de | ||
|
|
a5e15f9c5e | ||
|
|
851c1f450c | ||
|
|
fd12921f6d | ||
|
|
1100e160de | ||
|
|
bfcdf011c4 | ||
|
|
48ff1ebefa | ||
|
|
f1c9d05abc | ||
|
|
73217c9178 | ||
|
|
7c5acb8c21 | ||
|
|
10662ec679 | ||
|
|
65aba12dd0 | ||
|
|
8d1410b181 | ||
|
|
d9fbccb644 | ||
|
|
c9334074f6 | ||
|
|
f4b77598c3 | ||
|
|
478a046074 | ||
|
|
c4cb1f9c66 | ||
|
|
f84a3a9034 | ||
|
|
fa389515cd | ||
|
|
06c480ef00 | ||
|
|
71dd07ac28 | ||
|
|
30f15cf7e8 | ||
|
|
cc99e91935 | ||
|
|
6544c1887a | ||
|
|
9636d4d347 | ||
|
|
c7ce80c5e3 | ||
|
|
5d0050535d | ||
|
|
340d013bb3 | ||
|
|
f6c99c4c20 | ||
|
|
7c6eecb0e9 | ||
|
|
59b09e580e | ||
|
|
4174eebffe | ||
|
|
200aefac54 | ||
|
|
b200088093 | ||
|
|
8669ed8c73 | ||
|
|
8a96724247 | ||
|
|
3a913fba44 | ||
|
|
05a08d8640 | ||
|
|
2bea16d003 | ||
|
|
475f0438f9 | ||
|
|
c9720c15b9 | ||
|
|
0232fc1c2c | ||
|
|
9d5550bf5f | ||
|
|
873d9c2df6 | ||
|
|
5266b01e4c | ||
|
|
12315e90de | ||
|
|
c32905cca4 | ||
|
|
71571e2625 | ||
|
|
25eb29946b | ||
|
|
bb98d3209d | ||
|
|
73f624d6a1 | ||
|
|
d1f8c130a1 | ||
|
|
93e16c4b7c | ||
|
|
ee91d1d83e | ||
|
|
46d408056c | ||
|
|
d8ff64c1d9 | ||
|
|
453600a70c | ||
|
|
597e466d84 | ||
|
|
65164bd8ef | ||
|
|
dbf589da91 | ||
|
|
0857cbfa92 | ||
|
|
225edce50a | ||
|
|
5893fdefaf | ||
|
|
2b4ebd6e10 | ||
|
|
f97e48e886 | ||
|
|
b788bc6b6a | ||
|
|
8c91b01dd9 | ||
|
|
30b6dac7dd | ||
|
|
b6ec89e4aa | ||
|
|
39a0fcb358 | ||
|
|
9d353806a3 | ||
|
|
d24f8d5c7c | ||
|
|
cd7d51a16b | ||
|
|
d438fbba49 | ||
|
|
d73c71e18b | ||
|
|
3f4d1983e1 | ||
|
|
1c87ac0546 | ||
|
|
a11aa5c751 | ||
|
|
5cae0b99c7 | ||
|
|
58d0b6b458 | ||
|
|
4815d225d7 | ||
|
|
fd487350f5 | ||
|
|
dafcf22d49 | ||
|
|
78cefe19bd | ||
|
|
bbca105077 | ||
|
|
e3e4278206 | ||
|
|
e479e62cce | ||
|
|
8f73b08fbf | ||
|
|
5d02a8874f | ||
|
|
2ec5789d71 | ||
|
|
1c7a159e40 | ||
|
|
81a4caf23c | ||
|
|
9065b6034b | ||
|
|
49104922c1 | ||
|
|
409e064452 | ||
|
|
61f7efaa85 | ||
|
|
f0baa7496f | ||
|
|
7ed115cddb | ||
|
|
cb6dc29469 | ||
|
|
44032e2a9e | ||
|
|
7390a9421d | ||
|
|
f1cbe6fba9 | ||
|
|
5925cf5cc9 | ||
|
|
219a914cb1 | ||
|
|
6b6d6337c8 | ||
|
|
c511ae09a1 | ||
|
|
e658cb3aa0 | ||
|
|
9f4cf475b6 | ||
|
|
af53cbebed | ||
|
|
1895ebcf25 | ||
|
|
02a313d70b | ||
|
|
57ed905140 | ||
|
|
d46741c5ae | ||
|
|
ba3692e085 | ||
|
|
593cad65dd | ||
|
|
8bd05144da | ||
|
|
899ebcd681 | ||
|
|
f4917762c0 | ||
|
|
88c35281bf | ||
|
|
7e1810a0a7 | ||
|
|
7eb62de74b | ||
|
|
69bb7ef1fd | ||
|
|
57f0404a02 | ||
|
|
d8dccda1c0 | ||
|
|
16f3c4190d | ||
|
|
823878e963 | ||
|
|
f4246e3e0a | ||
|
|
9a8730a832 | ||
|
|
31d01aef11 | ||
|
|
a61da39038 | ||
|
|
7b9def2aeb | ||
|
|
92b8cc9f6b | ||
|
|
d3d5f7a745 | ||
|
|
be78e4c48a | ||
|
|
be9937355b | ||
|
|
f1d36f0f4a | ||
|
|
2a3f223a2b | ||
|
|
21401c544d | ||
|
|
6ceb7404bb | ||
|
|
a60d2e7f0f | ||
|
|
4480eff46d | ||
|
|
b265bd68b3 | ||
|
|
9e47eb3470 | ||
|
|
a050f419e4 | ||
|
|
d1373aa5b3 | ||
|
|
c53704849a | ||
|
|
e27a25b147 | ||
|
|
b1a88a9c2d | ||
|
|
780fd0490e | ||
|
|
b3d0ee7080 | ||
|
|
6d2287c4e5 | ||
|
|
8ae8c43c3c | ||
|
|
2a18321225 | ||
|
|
2f67e4de94 | ||
|
|
84c6af1ee9 | ||
|
|
caae39b2de | ||
|
|
2af30856ba | ||
|
|
a85e842911 | ||
|
|
2f9805e97b | ||
|
|
2c66b9ecc5 | ||
|
|
35e5f8928a | ||
|
|
4c647db584 | ||
|
|
e523ea0cc3 | ||
|
|
404343e5e7 | ||
|
|
27f5353ee6 | ||
|
|
848fd4d4c8 | ||
|
|
2f58bf2dd1 | ||
|
|
dbe32056d0 | ||
|
|
c88e210136 | ||
|
|
c002ba2f00 | ||
|
|
a8c076f2c9 | ||
|
|
99da264f31 | ||
|
|
6c862be9b9 | ||
|
|
6c75d64b5d | ||
|
|
bf59390f68 | ||
|
|
406cfaefef | ||
|
|
1a1919f38c | ||
|
|
3d99c300a4 | ||
|
|
1a63be7746 | ||
|
|
ae7e50396e | ||
|
|
f7ffb41dea | ||
|
|
1e6987a229 | ||
|
|
d0bc5caebf | ||
|
|
59c7481fd7 | ||
|
|
70a11de316 | ||
|
|
3da88135c2 | ||
|
|
d2a706fd9d | ||
|
|
8f57af0667 | ||
|
|
cb46029f35 | ||
|
|
e35c63ff5d | ||
|
|
440d54f500 | ||
|
|
f4a78ef397 | ||
|
|
96831e5973 | ||
|
|
c5f9cc104c | ||
|
|
dc6c391798 | ||
|
|
80e5879c43 | ||
|
|
d9864f31c1 | ||
|
|
75b0d25d3e | ||
|
|
625de2e38e | ||
|
|
0f7d64350f | ||
|
|
63620f246b | ||
|
|
f416d628ae | ||
|
|
24bf642dfa | ||
|
|
8a0321bf67 | ||
|
|
5e033e1f76 | ||
|
|
ec47fa42b8 | ||
|
|
080ecf9fd9 | ||
|
|
f0deda32dc | ||
|
|
7b3298e9e3 | ||
|
|
f26661459d | ||
|
|
b4c0047d45 | ||
|
|
4e03cf9675 | ||
|
|
33eb5fc020 | ||
|
|
01d62a6a15 | ||
|
|
bae22e0104 | ||
|
|
33b49d46ce | ||
|
|
c6beda1b8f | ||
|
|
8faba6ec5b | ||
|
|
e2824da846 | ||
|
|
b4dcadbf17 | ||
|
|
e96e1ebb9c | ||
|
|
b59158d314 | ||
|
|
82588a0f83 | ||
|
|
c6c91b6b11 | ||
|
|
a8918156f6 | ||
|
|
df56123ceb | ||
|
|
76d331ef03 | ||
|
|
71524b2073 | ||
|
|
f286efb174 | ||
|
|
feea9384c1 | ||
|
|
7dfcc9a3b6 | ||
|
|
43efd8e60b | ||
|
|
28fce80476 | ||
|
|
cf4c4664e1 | ||
|
|
9a8b2a308d | ||
|
|
188a70de21 | ||
|
|
5fccd1064f | ||
|
|
9f58943dfc | ||
|
|
9111f8b8c7 | ||
|
|
89dba2dbc0 | ||
|
|
0b79f125ed | ||
|
|
59bc429ce5 | ||
|
|
46db31c695 | ||
|
|
c45a617a90 | ||
|
|
2982ac051f | ||
|
|
a6b1b385e5 | ||
|
|
3f8c62c1a4 | ||
|
|
e31443b285 | ||
|
|
10beeab4b5 | ||
|
|
a5fdfc3b67 | ||
|
|
73d0e2589b | ||
|
|
2c6d2a30d7 | ||
|
|
d0566bf336 | ||
|
|
e93e00f4f7 | ||
|
|
720252f6ae | ||
|
|
43ee9b05a3 | ||
|
|
bcfb847412 | ||
|
|
402a7a730d | ||
|
|
5e975a6acd | ||
|
|
f1639126df | ||
|
|
7aa048a512 | ||
|
|
76b958ed89 | ||
|
|
d0c8e0bb49 | ||
|
|
7f1df2d24c | ||
|
|
7331fdc083 | ||
|
|
980d675572 | ||
|
|
749df9e554 | ||
|
|
7bb97dadf4 | ||
|
|
a223357ef1 | ||
|
|
9ef1e3cd9b | ||
|
|
1459b05189 | ||
|
|
2c992a0d23 | ||
|
|
f6a712c17b | ||
|
|
97c03d97ad | ||
|
|
b80589ea63 | ||
|
|
4fd3e606fd | ||
|
|
d1ae925454 | ||
|
|
3e260e4464 | ||
|
|
b7d841be14 | ||
|
|
854ce07e8f | ||
|
|
5e572ce5ef | ||
|
|
bec8b3ce0f | ||
|
|
df302015a3 | ||
|
|
fd9c7848c2 | ||
|
|
3c6a2e2546 | ||
|
|
af7229acb0 | ||
|
|
e1fbbe7c4e | ||
|
|
cdc7827b7e | ||
|
|
f24005e02e | ||
|
|
0ff9a7e50d | ||
|
|
3ec810ed82 | ||
|
|
93fc8f7452 | ||
|
|
172b3daeab | ||
|
|
1b3e3b19cf | ||
|
|
29b31ffa1e | ||
|
|
f1aca3df4d | ||
|
|
180ac60d21 | ||
|
|
b22621e3c4 | ||
|
|
db1d61f1e3 | ||
|
|
8e0c5b3792 | ||
|
|
1b30a8edd4 | ||
|
|
fe1376a70c | ||
|
|
2dbce8f695 | ||
|
|
b1700f3d14 | ||
|
|
63e3e0b9d3 | ||
|
|
cdd55ee61d | ||
|
|
c72050d329 | ||
|
|
9a89b82f83 | ||
|
|
37becd9327 | ||
|
|
d5f2ae7871 | ||
|
|
08b74b5304 | ||
|
|
6183dbf653 | ||
|
|
939f3db719 | ||
|
|
d3e2ee2974 | ||
|
|
0b6d955672 | ||
|
|
6884453b0f | ||
|
|
0d37600cda | ||
|
|
c32211aa7a | ||
|
|
e0776952ee | ||
|
|
d92e4a014a | ||
|
|
48fc51a89a | ||
|
|
0af29e146d | ||
|
|
4467c64edd | ||
|
|
1314d7744f | ||
|
|
9a984e7e41 | ||
|
|
89e4010566 | ||
|
|
1ecfc56f19 | ||
|
|
579c6eaf77 | ||
|
|
4cecfd1611 | ||
|
|
5a9899c63d | ||
|
|
a1d3c46127 | ||
|
|
902f3f7d00 | ||
|
|
8ef32c50c2 | ||
|
|
e0d0512777 | ||
|
|
9c73ffd7da | ||
|
|
0e2ca7c0d9 | ||
|
|
2d8f5ef337 | ||
|
|
a02e9b8318 | ||
|
|
4f844f673f | ||
|
|
1a068ed82b | ||
|
|
b8df4caf75 | ||
|
|
11da11c326 | ||
|
|
3e41a5568f | ||
|
|
3f73ee6cff | ||
|
|
866137e128 | ||
|
|
66b0ac7d51 | ||
|
|
7ac7edb437 | ||
|
|
82ed74429c | ||
|
|
417dc7dac1 | ||
|
|
999ce044cb | ||
|
|
8759508c86 | ||
|
|
56a40edc66 | ||
|
|
5021c77e53 | ||
|
|
3fbe859c28 | ||
|
|
560e94a232 | ||
|
|
d982ccd4d1 | ||
|
|
45b38c23a5 | ||
|
|
442180014d | ||
|
|
867a968b5b | ||
|
|
334529ab67 | ||
|
|
c66fc2d6de | ||
|
|
5eea95d118 | ||
|
|
804a215190 | ||
|
|
da6dfc9bcc | ||
|
|
0d4ec308d6 | ||
|
|
24437e7b10 | ||
|
|
1abaff6625 | ||
|
|
0c1692866c | ||
|
|
d28e9bb51c | ||
|
|
760c5d521b | ||
|
|
6ce14ffc80 | ||
|
|
cc54725011 | ||
|
|
c1a6f2b957 | ||
|
|
5d5578f89f | ||
|
|
e7bb58397e | ||
|
|
04690f1949 | ||
|
|
4df449c724 | ||
|
|
315be1b61d | ||
|
|
4c30fb09ab | ||
|
|
2a24d1d9b7 | ||
|
|
fe771928ee | ||
|
|
768baccea7 | ||
|
|
c5a6f06c35 | ||
|
|
e57a90ca01 | ||
|
|
9d95750092 | ||
|
|
3c5a218152 | ||
|
|
cf6148eca5 | ||
|
|
df1945fb38 | ||
|
|
ed93efd1bd | ||
|
|
2d48073262 | ||
|
|
8bba27361f | ||
|
|
de54794506 | ||
|
|
0aee4d8466 | ||
|
|
1d8e0c7b1d | ||
|
|
6fbec9fbd7 | ||
|
|
6d20457f0e | ||
|
|
73dc18fb78 | ||
|
|
440284663a | ||
|
|
796793eefd | ||
|
|
260fd572bf | ||
|
|
d6c03a51b1 | ||
|
|
fe89c5835c | ||
|
|
ddcc35ec58 | ||
|
|
0f39497bee | ||
|
|
58f1346124 | ||
|
|
997366655c | ||
|
|
de057fd08f | ||
|
|
d15607048e | ||
|
|
5460d19f80 | ||
|
|
ed4ae5efb8 | ||
|
|
530da2589d | ||
|
|
05b2a16746 | ||
|
|
35cbad2177 | ||
|
|
f13ac340b1 | ||
|
|
14dc5e40fe | ||
|
|
4820313158 | ||
|
|
9d573932f5 | ||
|
|
ab88fbc1bd | ||
|
|
1a806c209b | ||
|
|
f15540e860 | ||
|
|
d1be56923f | ||
|
|
493044f4e4 | ||
|
|
d15ca72e07 | ||
|
|
9a16634c2e | ||
|
|
997940b4ab | ||
|
|
fee4585ca4 | ||
|
|
94c1df79ef | ||
|
|
b4e6626feb | ||
|
|
b8ad77085c | ||
|
|
750f1fc379 | ||
|
|
8b93b5d273 | ||
|
|
2ffbb9d9fa | ||
|
|
97180c2f04 | ||
|
|
a9b9ed2d99 | ||
|
|
097f6c3b64 | ||
|
|
a279982187 | ||
|
|
26c692978a | ||
|
|
482f8bb862 | ||
|
|
3ac7cc1a59 | ||
|
|
7c5aff44a9 | ||
|
|
a55d4e3c6f | ||
|
|
485b337c72 | ||
|
|
02f16168d3 | ||
|
|
bd42af9c3e | ||
|
|
ce5a5a6e50 | ||
|
|
bf893ca8b4 | ||
|
|
20a450d1d4 | ||
|
|
70d11c3f5d | ||
|
|
dc7690f4cf | ||
|
|
603260df5d | ||
|
|
51bdc6b221 | ||
|
|
4f6a9545e9 | ||
|
|
14edb4f913 | ||
|
|
ffe0459994 | ||
|
|
5138ce2040 | ||
|
|
db85de0722 | ||
|
|
082a76bfb7 | ||
|
|
9a3b160e62 | ||
|
|
c8b312bcb1 | ||
|
|
9cc3fa0897 | ||
|
|
49063cac26 | ||
|
|
59a06c84a1 | ||
|
|
6f5a7b97b5 | ||
|
|
0db97fd5d5 | ||
|
|
b31f74f0c5 | ||
|
|
ffb3957d4f | ||
|
|
3b46e9a6bb | ||
|
|
b67cb3bd9e | ||
|
|
91a6485c20 | ||
|
|
da24634774 | ||
|
|
88198f7696 | ||
|
|
2bdc7cd39a | ||
|
|
ec4fcbc553 | ||
|
|
30820a8d81 | ||
|
|
1c439a0392 | ||
|
|
d6fe3461f7 | ||
|
|
18a211592d | ||
|
|
8396c04b4a | ||
|
|
872254e865 | ||
|
|
2a8fb84dc3 | ||
|
|
008dab08f8 | ||
|
|
781a3958d8 | ||
|
|
ec344cafb5 | ||
|
|
ec20b288be | ||
|
|
9206e00a99 | ||
|
|
438a172e77 | ||
|
|
8aaa559fbb | ||
|
|
79b3d30c1e | ||
|
|
3c5293b60b | ||
|
|
d16da528af | ||
|
|
6407a3a06d | ||
|
|
145804c05d | ||
|
|
7b5da511fe | ||
|
|
cc65d5ac23 | ||
|
|
6c41362bfa | ||
|
|
b57688033e | ||
|
|
9c120118e9 | ||
|
|
70ce321c75 | ||
|
|
ed3df2beba | ||
|
|
613d445c99 | ||
|
|
5cc3885e32 | ||
|
|
7e66eca6ae | ||
|
|
eef1c3f41d | ||
|
|
09defb60a5 | ||
|
|
4fd931c508 | ||
|
|
9b656a13e6 | ||
|
|
e95fe8f410 | ||
|
|
91628c61ee | ||
|
|
0747bc9bc1 | ||
|
|
2df881d43f | ||
|
|
cd10eaf9db | ||
|
|
e0f69a6420 | ||
|
|
e8828ed74b | ||
|
|
ce29e0685c | ||
|
|
6c7dda2b67 | ||
|
|
7694e3f565 | ||
|
|
b8c9731081 | ||
|
|
14ec8b2bcb | ||
|
|
e9d7543c43 | ||
|
|
311e090826 | ||
|
|
1c1172e0e7 | ||
|
|
2a1b719f67 | ||
|
|
9b9cf8ad9f | ||
|
|
946f427c4b | ||
|
|
100f34a32b | ||
|
|
7a9b506958 | ||
|
|
4cd074ee2a | ||
|
|
e2a76274ef | ||
|
|
aa197bd89e | ||
|
|
fde1165408 | ||
|
|
731b91e0a9 | ||
|
|
60205ece9d | ||
|
|
6b8fc5d624 | ||
|
|
54df0af48e | ||
|
|
f1101cae5a | ||
|
|
f7a1edb7e5 | ||
|
|
4e7311bdfe | ||
|
|
818210805f | ||
|
|
0e1026a100 | ||
|
|
553a2e3dad | ||
|
|
c12eb40459 | ||
|
|
9b68283eb3 | ||
|
|
e9ee467940 | ||
|
|
91f1de5d95 | ||
|
|
6ce6b55b68 | ||
|
|
d0b5e3e241 | ||
|
|
f16176f298 | ||
|
|
2dc4d41524 | ||
|
|
2bf2901dcd | ||
|
|
99c2d6195d | ||
|
|
c5b1079be9 | ||
|
|
c25e05fde7 | ||
|
|
c6290cb7f6 | ||
|
|
b08d5090a2 | ||
|
|
3da824b4c3 | ||
|
|
9a1619c554 | ||
|
|
312f7a9003 | ||
|
|
a197948837 | ||
|
|
765d510127 | ||
|
|
b5179e443b | ||
|
|
fe6c371a9f | ||
|
|
d66c5ed618 | ||
|
|
70ba120cfb | ||
|
|
33f1d85194 | ||
|
|
e3f1b449e2 | ||
|
|
7e580efc77 | ||
|
|
b33501cce6 | ||
|
|
402259ef11 | ||
|
|
2d4c682fac | ||
|
|
2affc5f555 | ||
|
|
9ce1dded38 | ||
|
|
383b0d125c | ||
|
|
8023710702 | ||
|
|
82c0cbc8bd | ||
|
|
893a7f22a4 | ||
|
|
b903e511b5 | ||
|
|
86379d1c79 | ||
|
|
6320d6fbeb | ||
|
|
636023167d | ||
|
|
8a86825357 | ||
|
|
f5d68b4d78 | ||
|
|
7295e656eb | ||
|
|
5e51050fa3 | ||
|
|
c510a64207 | ||
|
|
7269050cf4 | ||
|
|
5730e5d2ae | ||
|
|
76ddbe1869 | ||
|
|
b5ef174312 | ||
|
|
31007f3d78 | ||
|
|
c5a66709ad | ||
|
|
aa4815b01d | ||
|
|
7d6b975592 | ||
|
|
3a57712dd2 | ||
|
|
62eb35340e | ||
|
|
e9880d00ab | ||
|
|
73c1792fea | ||
|
|
d36f399d08 | ||
|
|
c8a24dbcbc | ||
|
|
fecc5134ad | ||
|
|
8ad84a8cec | ||
|
|
51ebee2910 | ||
|
|
20d496e617 | ||
|
|
44a16dd214 | ||
|
|
4695b79b73 | ||
|
|
9c0be07ae1 | ||
|
|
ab945aadde | ||
|
|
c1c461c273 | ||
|
|
336742ee69 | ||
|
|
405dacce1c | ||
|
|
9acd45aa93 | ||
|
|
c889c2cc03 | ||
|
|
7ac91ef416 | ||
|
|
8e28669aa1 | ||
|
|
6cdb91f8b8 | ||
|
|
e892474713 | ||
|
|
abdae98816 | ||
|
|
ab4fe49f33 | ||
|
|
1ace35103b | ||
|
|
dbee729eee | ||
|
|
792576ce59 | ||
|
|
a07624e9b9 | ||
|
|
bb8054af8a | ||
|
|
7738f3e066 | ||
|
|
5dee16a100 | ||
|
|
35f3bcdf2f | ||
|
|
130ca8e1f1 | ||
|
|
ced4c21688 | ||
|
|
6ec7078024 | ||
|
|
b28d8f2506 | ||
|
|
c88a9291a0 | ||
|
|
1e82d19306 | ||
|
|
dd87e50cb2 | ||
|
|
4c8f96a30f | ||
|
|
c4f45e05f1 | ||
|
|
6aa382c7c1 | ||
|
|
ccb9f059e6 | ||
|
|
1cdcea0771 | ||
|
|
88dda0de80 | ||
|
|
30ed99e2b0 | ||
|
|
e5953b7541 | ||
|
|
1f9d01c59f | ||
|
|
cc5210a3d8 | ||
|
|
26559e2d3b | ||
|
|
7eeddb300f | ||
|
|
1e01bae16b | ||
|
|
87c03924e5 | ||
|
|
f0998b1d43 | ||
|
|
1995a04244 | ||
|
|
420fe6bcd5 | ||
|
|
d4e26c0553 | ||
|
|
5f5e7cb45e | ||
|
|
8aa0304738 | ||
|
|
8ec98c33a4 | ||
|
|
2667182ca3 | ||
|
|
1cd0018b93 | ||
|
|
359789ee29 | ||
|
|
e79c860c0f | ||
|
|
765f53f30e | ||
|
|
3c3c21d7f5 | ||
|
|
eb700cb500 | ||
|
|
b3b723a717 | ||
|
|
555c230d2e | ||
|
|
adf4b97aef | ||
|
|
32c38d796b | ||
|
|
c8829e15ed | ||
|
|
453df417d0 | ||
|
|
02a7741a9c | ||
|
|
96fc5b0ca8 | ||
|
|
b13e624a66 | ||
|
|
6a8f66f272 | ||
|
|
0c638a08fd | ||
|
|
b07f8af8e5 | ||
|
|
3bbb2a985f | ||
|
|
afdf71c545 | ||
|
|
8de8d2df9a | ||
|
|
1dffdbddc2 | ||
|
|
11fff633b0 | ||
|
|
61bc44d1f4 | ||
|
|
e8fabb8cfa | ||
|
|
7a50885847 | ||
|
|
6239da45f4 | ||
|
|
af597eb3c7 | ||
|
|
d66cda068c | ||
|
|
91fcd07c1c | ||
|
|
85aa470da1 | ||
|
|
6f0d5f37a5 | ||
|
|
1b4d604404 | ||
|
|
a7f6cbe0b9 | ||
|
|
9cf28bf123 | ||
|
|
c92e04294a | ||
|
|
36f059b455 | ||
|
|
4aac301852 | ||
|
|
b375708bbd | ||
|
|
10b6a9482b | ||
|
|
84d45c5df8 | ||
|
|
c6559814b1 | ||
|
|
c8c080183f | ||
|
|
597b67f799 | ||
|
|
ec52d5f422 | ||
|
|
34806304d6 | ||
|
|
87953d5495 | ||
|
|
b6c7c80c95 | ||
|
|
77e568d5c3 | ||
|
|
699fc51cf7 | ||
|
|
21029c90b7 | ||
|
|
6b0530eb7f | ||
|
|
f7061c7225 | ||
|
|
750f698369 | ||
|
|
ec5ef42298 | ||
|
|
46820b0044 | ||
|
|
425a6648f7 | ||
|
|
349fc297ce | ||
|
|
5ad87c03ed | ||
|
|
d16006f28f | ||
|
|
7f0452a5f5 | ||
|
|
c605b2f6fc | ||
|
|
6c2d8a8494 | ||
|
|
874691f729 | ||
|
|
cdf702e17d | ||
|
|
25fdb32627 | ||
|
|
e976ea0a3a | ||
|
|
34e6b4fc69 | ||
|
|
a2d77567b3 | ||
|
|
ecb460f9b5 | ||
|
|
63444b089c | ||
|
|
c787984b77 | ||
|
|
bf3d03e801 | ||
|
|
bc2e69b975 | ||
|
|
7b94fcf3da | ||
|
|
9cf03b8b88 | ||
|
|
a288edcf61 | ||
|
|
89cc18ad37 | ||
|
|
ffa3b671e1 | ||
|
|
f32eeb413b | ||
|
|
b5a5103cfc | ||
|
|
c5697e59f3 | ||
|
|
f030667ff4 | ||
|
|
e9fef5d97c | ||
|
|
f5818ac7ea | ||
|
|
c85ab4110d | ||
|
|
9690ea35b8 | ||
|
|
6300c8011b | ||
|
|
97f582b381 | ||
|
|
5135a9c228 | ||
|
|
b7d1212a82 | ||
|
|
7d9d0a9fc4 | ||
|
|
ed9aef4321 | ||
|
|
0aa638bdf4 | ||
|
|
0ec39d793d | ||
|
|
5579ba869c | ||
|
|
210940038c | ||
|
|
98a1a60362 | ||
|
|
86cf9116ba | ||
|
|
8b2defe0d9 | ||
|
|
50b14b3ce5 | ||
|
|
1bfb17cb5d | ||
|
|
b90acb66c7 | ||
|
|
7648b0dd10 | ||
|
|
2d69f1791a | ||
|
|
5ba887095a | ||
|
|
19b7405562 | ||
|
|
f5c5f734e1 | ||
|
|
8d1639bcaf | ||
|
|
e2446af00e | ||
|
|
1b39aaaa38 | ||
|
|
5a2a1a3d98 | ||
|
|
39eceb745b | ||
|
|
4c1ec5db33 | ||
|
|
8b68b9481e | ||
|
|
14843f83c6 | ||
|
|
e67d87e885 | ||
|
|
7d4d865d58 | ||
|
|
1e4aaff23c | ||
|
|
df3f4a5f4a | ||
|
|
1f8557300d | ||
|
|
bf17d705f0 | ||
|
|
0d24b792c6 | ||
|
|
fb61e36417 | ||
|
|
c39869d2f8 | ||
|
|
750e0274da | ||
|
|
a9d37ab667 | ||
|
|
eacb549d5e | ||
|
|
ce7cb8fe45 | ||
|
|
f9fe4e32b4 | ||
|
|
2c9fc2bad4 | ||
|
|
94949291c2 | ||
|
|
2944ba6ef9 | ||
|
|
997e68a31d | ||
|
|
bfb9d9e34d | ||
|
|
3b9219b586 | ||
|
|
7bf2a88ab1 | ||
|
|
d21ed093dc | ||
|
|
6e89671e91 | ||
|
|
ee1128a666 | ||
|
|
63b5deecd7 | ||
|
|
f4f97ce1a7 | ||
|
|
a666df099f | ||
|
|
21dd0ee072 | ||
|
|
bd2a1d4236 | ||
|
|
7acdbcfd8f | ||
|
|
58514c5c93 | ||
|
|
580e800923 | ||
|
|
29f6b19f33 | ||
|
|
e090247723 | ||
|
|
1374c26cd8 | ||
|
|
5467b40b2e | ||
|
|
165b9012da | ||
|
|
22630f665e | ||
|
|
3d867084ba | ||
|
|
171dd2d9e0 | ||
|
|
9709239f88 | ||
|
|
60d457b285 | ||
|
|
8b1d4793a7 | ||
|
|
f2166c8435 | ||
|
|
07d723a748 | ||
|
|
b36f485287 | ||
|
|
a121ae0828 | ||
|
|
e2b5a02008 | ||
|
|
575aa62625 | ||
|
|
ac88a2c4ed | ||
|
|
f1dcb71a8a | ||
|
|
30d04bc201 | ||
|
|
33a00bb1a2 | ||
|
|
ccca44ea89 | ||
|
|
ae5f36fe51 | ||
|
|
69ce1e4f36 | ||
|
|
6e444b9032 | ||
|
|
73eff72da4 | ||
|
|
698e3c214b | ||
|
|
9da77667dc | ||
|
|
c30793fb8f | ||
|
|
84fdaab24d | ||
|
|
cbd67bb609 | ||
|
|
00f58e9008 | ||
|
|
7738fab351 | ||
|
|
06e8f6589b | ||
|
|
57d9287724 | ||
|
|
2cc65595ee | ||
|
|
3dd2b97873 | ||
|
|
3c805ebbf7 | ||
|
|
a854160018 | ||
|
|
a99d9e5969 | ||
|
|
813b6c1182 | ||
|
|
2958f9589b | ||
|
|
69b4e26176 | ||
|
|
78b00f139d | ||
|
|
dc1e8de851 | ||
|
|
3187b335a3 | ||
|
|
54b5a2b420 | ||
|
|
14c6bd00a8 | ||
|
|
e9c3646450 | ||
|
|
4f20257479 | ||
|
|
65749991de | ||
|
|
237a1d802d | ||
|
|
e4336f19f3 | ||
|
|
c895e5e67f | ||
|
|
4e4e210736 | ||
|
|
09dfc8faa3 | ||
|
|
3c4f77cc78 | ||
|
|
c86880ccdb | ||
|
|
9db26a3037 | ||
|
|
711d27e15e | ||
|
|
e1d53598e6 | ||
|
|
f871bc3e03 | ||
|
|
69b359ce4a | ||
|
|
0dd914b6e4 | ||
|
|
f03d0b8930 | ||
|
|
a672c1cba3 | ||
|
|
233d9dab33 | ||
|
|
f5ac23834d | ||
|
|
03bc43b04e | ||
|
|
fdbd2a2181 | ||
|
|
ae9fe5c424 | ||
|
|
7de10b7277 | ||
|
|
8c1f4d7786 | ||
|
|
f3e84b52c6 | ||
|
|
d64c46f44d | ||
|
|
a6a58a25be | ||
|
|
37046ddbd8 | ||
|
|
6f697d292a | ||
|
|
4098c6b487 | ||
|
|
d2844b6558 | ||
|
|
59eb63bcda | ||
|
|
7842ac5c45 | ||
|
|
ba7b6db5b0 | ||
|
|
3648a6efd4 | ||
|
|
f96ee3f1e8 | ||
|
|
f0a9b4f5a6 | ||
|
|
f01c159f38 | ||
|
|
79c587c892 | ||
|
|
9f4a1625ff | ||
|
|
3b331e1df7 | ||
|
|
fb067c15ff | ||
|
|
f4bba8febc | ||
|
|
bf442f5cdf | ||
|
|
447e41c0af | ||
|
|
5901992d80 | ||
|
|
13e8ad93e2 | ||
|
|
66daa1ac17 | ||
|
|
3622394751 | ||
|
|
53ab799d9b | ||
|
|
5d0563968b | ||
|
|
6e9b692328 | ||
|
|
de7ddd2224 | ||
|
|
db82a2d44e | ||
|
|
ee46c86106 | ||
|
|
756490bc0c | ||
|
|
a483283352 | ||
|
|
399e4bfd40 | ||
|
|
d1df2818b5 | ||
|
|
5438ca87fb | ||
|
|
20a2a660a0 | ||
|
|
f9d234908b | ||
|
|
56b21f0d9e | ||
|
|
d604b28d91 | ||
|
|
d227f2c311 | ||
|
|
a1acac1479 | ||
|
|
805ebe55ce | ||
|
|
3f478fefdf | ||
|
|
809be61e9c | ||
|
|
cfa3dd537d | ||
|
|
511efdc113 | ||
|
|
34a6ac3be3 | ||
|
|
1674f066ea | ||
|
|
572560d5b2 | ||
|
|
34822d6d19 | ||
|
|
6655cb43b5 | ||
|
|
7060926854 | ||
|
|
a19c339857 | ||
|
|
2cc86a8f21 | ||
|
|
5adbba61ec | ||
|
|
d0540669b4 | ||
|
|
4fb3eb12ba | ||
|
|
f885398d7c | ||
|
|
bda5a93ea4 | ||
|
|
713d7680c1 | ||
|
|
3b574341f2 | ||
|
|
664a585bdd | ||
|
|
0a0bf55204 | ||
|
|
b3405a8ab9 | ||
|
|
7693993d18 | ||
|
|
89e16ef715 | ||
|
|
199a0e0517 | ||
|
|
ca80fb4d28 | ||
|
|
143d855ad7 | ||
|
|
aaea572c4f | ||
|
|
41d86e1f34 | ||
|
|
00bae80410 | ||
|
|
ef0522b200 | ||
|
|
f61f38be7e | ||
|
|
120877bfb5 | ||
|
|
3587818775 | ||
|
|
1f2cf5a848 | ||
|
|
d9e4e5b390 | ||
|
|
4f965cde2d | ||
|
|
662df40aaf | ||
|
|
b41486fb20 | ||
|
|
6acbc19c6a | ||
|
|
01dfdd03fe | ||
|
|
5dc335595e | ||
|
|
7c5cd1553d | ||
|
|
c78c89da37 | ||
|
|
f9f3b276c8 | ||
|
|
e982f5a6f2 | ||
|
|
ba0f76fd59 | ||
|
|
e26769a787 | ||
|
|
cffe3b1428 | ||
|
|
82f9076f3e | ||
|
|
80438e7a74 | ||
|
|
8453f9ac5f | ||
|
|
a9834a9997 | ||
|
|
772aca95f9 | ||
|
|
e1b1ee3f8d | ||
|
|
91b640cd90 | ||
|
|
47ed7d8d5f | ||
|
|
af948edbea | ||
|
|
e18cd2eebb | ||
|
|
2e5f2d11b4 | ||
|
|
2906eaf5f5 | ||
|
|
4bc8fc4b25 | ||
|
|
487bf515ba | ||
|
|
9378954551 | ||
|
|
6b366fb0e2 | ||
|
|
a5af235cef | ||
|
|
b0b991115b | ||
|
|
f2db575c70 | ||
|
|
5628d92b3c | ||
|
|
ba994e80f6 | ||
|
|
db9f633e89 | ||
|
|
7aaececa87 | ||
|
|
1c6f31cfea | ||
|
|
88f46a04f0 | ||
|
|
8b0fd2326f | ||
|
|
86af789dd3 | ||
|
|
243f54ec11 | ||
|
|
4cc1f4458e | ||
|
|
823a735ef7 | ||
|
|
7782412c9b | ||
|
|
ccd7d715b9 | ||
|
|
a192b142d4 | ||
|
|
277d5fe662 | ||
|
|
44b9eb462e | ||
|
|
680bb55321 | ||
|
|
33e0568782 | ||
|
|
2b1b88ce3c | ||
|
|
bd02b9e281 | ||
|
|
85c5b146d4 | ||
|
|
ac3d17ce82 | ||
|
|
f6f94303a5 | ||
|
|
53cf644008 | ||
|
|
3dfeb65aab | ||
|
|
c90e91d78b | ||
|
|
17a5d624d9 | ||
|
|
ef8f75b3e2 | ||
|
|
da83afc6f4 | ||
|
|
d77d07cb52 | ||
|
|
161ac34afa | ||
|
|
e9685ce71a | ||
|
|
bdf4e9d6c6 | ||
|
|
5e205efd29 | ||
|
|
a491f206c9 | ||
|
|
a6d655b42e | ||
|
|
f5fa676db6 | ||
|
|
8b16aff3bf | ||
|
|
3006aa7fa3 | ||
|
|
d5861274de | ||
|
|
a927571e29 | ||
|
|
9998debb96 | ||
|
|
956805603e | ||
|
|
4a4bf197a9 | ||
|
|
0845dc04e1 | ||
|
|
6104e5aa5a | ||
|
|
1d717dff7a | ||
|
|
c2702869c1 | ||
|
|
58a5accd83 | ||
|
|
de937707e1 | ||
|
|
0ceecee604 | ||
|
|
5d0c14201d | ||
|
|
d0d6271d8f | ||
|
|
90884904de | ||
|
|
b042494b44 | ||
|
|
80ced82131 | ||
|
|
f74faaecf8 | ||
|
|
3b6d3af7bb | ||
|
|
15662c951d | ||
|
|
de8c59bb61 | ||
|
|
9cde83de0e | ||
|
|
0a5256cbd7 | ||
|
|
99ce35ce1d | ||
|
|
4d3feb52c7 | ||
|
|
eb79e8726d | ||
|
|
3c055544cc | ||
|
|
0d4118dac8 | ||
|
|
dac9c05a89 | ||
|
|
1dbc2909e2 | ||
|
|
6fc4a0627c | ||
|
|
4a99ccc1a9 | ||
|
|
46dc663385 | ||
|
|
d2a3a6cddf | ||
|
|
0d140b420e | ||
|
|
6be463475c | ||
|
|
858cead89d | ||
|
|
ba439d6f5f | ||
|
|
9c09906bdd | ||
|
|
90173a30e7 | ||
|
|
902bf0c981 | ||
|
|
5ac3239f4f | ||
|
|
3a0fa36d19 | ||
|
|
3b0f919baa | ||
|
|
b2226efd59 | ||
|
|
a0beb27072 | ||
|
|
5cd68191fc | ||
|
|
b9978f531a | ||
|
|
11a4379fb6 | ||
|
|
090c4a6dc7 | ||
|
|
0b3bd899fe | ||
|
|
0e913ac420 | ||
|
|
3e5df637fd | ||
|
|
81e96861d7 | ||
|
|
6284a48a3a | ||
|
|
c0854c7923 | ||
|
|
b4c98820d9 | ||
|
|
1cbb7d6bfd | ||
|
|
be58cec1c9 | ||
|
|
839af15311 | ||
|
|
a985139f9c | ||
|
|
7719c17227 | ||
|
|
b92111c7c2 | ||
|
|
a3ceef26f8 | ||
|
|
e31515f6fd | ||
|
|
38be2c53e7 | ||
|
|
1d64040129 | ||
|
|
ae9a1cb89e | ||
|
|
9858a7a5ce | ||
|
|
e5dec41b49 | ||
|
|
b98f4a6ec1 | ||
|
|
ba431638b7 | ||
|
|
ffeca7658a | ||
|
|
daf06a33f8 | ||
|
|
e81b6a14de | ||
|
|
c79469af30 | ||
|
|
d761e49a64 | ||
|
|
0f865be593 | ||
|
|
4130bc1726 | ||
|
|
5559c3de42 | ||
|
|
1884693ca6 | ||
|
|
1eb707a759 | ||
|
|
91246b7078 | ||
|
|
31dd47aac8 | ||
|
|
644a3c795e | ||
|
|
521976e57e | ||
|
|
f1df4f36df | ||
|
|
e8e566ea62 | ||
|
|
b0a865ddb8 | ||
|
|
937b271686 | ||
|
|
c4a533bc4b | ||
|
|
16aa58482f | ||
|
|
1674825a94 | ||
|
|
a382b4d589 | ||
|
|
22a8845c53 | ||
|
|
34b60c755b | ||
|
|
38d7126959 | ||
|
|
00a89ccb48 | ||
|
|
4e518d90ad | ||
|
|
0d4bacd892 | ||
|
|
d876634899 | ||
|
|
8c8b4ea488 | ||
|
|
209f6ebb87 | ||
|
|
15faefff93 | ||
|
|
bf80b34e8e | ||
|
|
8c87102116 | ||
|
|
500e180241 | ||
|
|
20997cf5ac | ||
|
|
96127b52e6 | ||
|
|
192363bb64 | ||
|
|
8d7b0625b0 | ||
|
|
47717760b2 | ||
|
|
9c5d2da199 | ||
|
|
556f9d7d96 | ||
|
|
e5bd6493f5 | ||
|
|
7a4cf8be2d | ||
|
|
4a7c0008e0 | ||
|
|
d15d2ca56b | ||
|
|
a91fd11844 | ||
|
|
37d5606eb3 | ||
|
|
56269b2fcf | ||
|
|
abc1ad4356 | ||
|
|
052756140b | ||
|
|
6f03bd5827 | ||
|
|
437052b58b | ||
|
|
e35d3473c7 | ||
|
|
f48703aef7 | ||
|
|
393e202102 | ||
|
|
809732e7f3 | ||
|
|
a7abca2038 | ||
|
|
475f6774bf | ||
|
|
d4a775025f | ||
|
|
21c8fac0a4 | ||
|
|
d0051016fc | ||
|
|
b495593b59 | ||
|
|
aaa8d7e97d | ||
|
|
af126c70a6 | ||
|
|
1a4e3fa55a | ||
|
|
e740128baf | ||
|
|
fdf5626a93 | ||
|
|
40c62234bb | ||
|
|
b96a6cf7c2 | ||
|
|
6728507914 | ||
|
|
f679fa2c0c | ||
|
|
ced8aa661b | ||
|
|
b936ff3e36 | ||
|
|
841b458f05 | ||
|
|
fd13240134 | ||
|
|
c70917382f | ||
|
|
8d2ea8ae87 | ||
|
|
d4a91f7240 | ||
|
|
4ba8c0f380 | ||
|
|
cb9b482340 | ||
|
|
797f33e0eb | ||
|
|
6a4991354b | ||
|
|
336646c280 | ||
|
|
2664ba939e | ||
|
|
47c3680d8c | ||
|
|
f281f68a8f | ||
|
|
1852ee5216 |
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
/target
|
||||
readme.md
|
||||
typeshare.toml
|
||||
LICENSE
|
||||
*.code-workspace
|
||||
|
||||
*/node_modules
|
||||
*/dist
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -1,7 +1,9 @@
|
||||
.DS_Store
|
||||
target
|
||||
/frontend/build
|
||||
node_modules
|
||||
build
|
||||
secrets
|
||||
bundle.*
|
||||
monitor_mount
|
||||
.env*
|
||||
/lib/ts_client/build
|
||||
dist
|
||||
.env
|
||||
.env.development
|
||||
creds.toml
|
||||
core.config.toml
|
||||
60
.vscode/fastify.code-snippets
vendored
60
.vscode/fastify.code-snippets
vendored
@@ -1,60 +0,0 @@
|
||||
{
|
||||
"service": {
|
||||
"scope": "typescript",
|
||||
"prefix": "plugin",
|
||||
"body": [
|
||||
"import { FastifyInstance } from \"fastify\";",
|
||||
"import fp from \"fastify-plugin\";",
|
||||
"",
|
||||
"const ${1:$TM_FILENAME_BASE} = fp((app: FastifyInstance, _: {}, done: () => void) => {",
|
||||
"\t${0}",
|
||||
"\tdone();",
|
||||
"});",
|
||||
"",
|
||||
"export default ${1:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
|
||||
"schema": {
|
||||
"scope": "typescript",
|
||||
"prefix": "schema",
|
||||
"body": [
|
||||
"import { FastifyInstance } from \"fastify\";",
|
||||
"import fp from \"fastify-plugin\";",
|
||||
"import { Schema } from \"mongoose\";",
|
||||
"import model from \"../../util/model\";",
|
||||
"",
|
||||
"const ${2:$TM_FILENAME_BASE} = fp((app: FastifyInstance, _: {}, done: () => void) => {",
|
||||
"\tconst schema = new Schema({",
|
||||
"\t\t${0}",
|
||||
"\t});",
|
||||
"\t",
|
||||
"\tapp.decorate(\"${2:$TM_FILENAME_BASE}\", model(app, \"${1}\", schema));",
|
||||
"\t",
|
||||
"\tdone();",
|
||||
"});",
|
||||
"",
|
||||
"export default ${2:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
|
||||
"get-auth": {
|
||||
"scope": "typescript",
|
||||
"prefix": "get-auth",
|
||||
"body": [
|
||||
"app.get(\"/${1}\", { onRequest: [app.auth, app.userEnabled] }, async (req, res) => {",
|
||||
"\t${0}",
|
||||
"});"
|
||||
]
|
||||
},
|
||||
|
||||
"post-auth": {
|
||||
"scope": "typescript",
|
||||
"prefix": "post-auth",
|
||||
"body": [
|
||||
"app.post(\"/${1}\", { onRequest: [app.auth, app.userEnabled] }, async (req, res) => {",
|
||||
"\t${0}",
|
||||
"});"
|
||||
]
|
||||
}
|
||||
}
|
||||
21
.vscode/ink.code-snippets
vendored
21
.vscode/ink.code-snippets
vendored
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"ink-comp": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "ink-comp",
|
||||
"body": [
|
||||
"import React from \"react\";",
|
||||
"import { Box } from \"ink\";",
|
||||
|
||||
"",
|
||||
"const ${1:$TM_FILENAME_BASE} = ({}: {}) => {",
|
||||
"\treturn (",
|
||||
"\t\t<Box>",
|
||||
"\t\t\t${0}",
|
||||
"\t\t</Box>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export default ${1:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
}
|
||||
25
.vscode/resolver.code-snippets
vendored
Normal file
25
.vscode/resolver.code-snippets
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
{
|
||||
"resolve": {
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"\t\ttodo!()",
|
||||
"\t}",
|
||||
"}"
|
||||
]
|
||||
},
|
||||
"static": {
|
||||
"scope": "rust",
|
||||
"prefix": "static",
|
||||
"body": [
|
||||
"fn ${1}() -> &'static ${2} {",
|
||||
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${3}.get_or_init(|| {",
|
||||
"\t\t${0}",
|
||||
"\t})",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
}
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"npm.exclude": "**/monitor/**"
|
||||
}
|
||||
64
.vscode/solid.code-snippets
vendored
64
.vscode/solid.code-snippets
vendored
@@ -1,64 +0,0 @@
|
||||
{
|
||||
"component": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "comp",
|
||||
"body": [
|
||||
"import { Component } from \"solid-js\";",
|
||||
"",
|
||||
"const ${1:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<div>",
|
||||
"\t\t\t${0}",
|
||||
"\t\t</div>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export default ${1:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
"component-with-css": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "css-comp",
|
||||
"body": [
|
||||
"import { Component } from \"solid-js\";",
|
||||
"import s from \"./${1:$TM_FILENAME_BASE}.module.scss\";",
|
||||
"",
|
||||
"const ${2:$TM_FILENAME_BASE}: Component<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<div class={s.${2:$TM_FILENAME_BASE}} >",
|
||||
"\t\t\t${0}",
|
||||
"\t\t</div>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export default ${2:$TM_FILENAME_BASE};"
|
||||
]
|
||||
},
|
||||
"context": {
|
||||
"scope": "typescriptreact,javascriptreact",
|
||||
"prefix": "provider",
|
||||
"body": [
|
||||
"import { Component, createContext, useContext } from \"solid-js\";",
|
||||
"",
|
||||
"const value = () => {",
|
||||
"\treturn {};",
|
||||
"}",
|
||||
"",
|
||||
"export type Value = ReturnType<typeof value>;",
|
||||
"",
|
||||
"const context = createContext<Value>();",
|
||||
"",
|
||||
"export const Provider: Component<{}> = (p) => {",
|
||||
"\treturn (",
|
||||
"\t\t<context.Provider value={value()}>",
|
||||
"\t\t\t{p.children}",
|
||||
"\t\t</context.Provider>",
|
||||
"\t);",
|
||||
"}",
|
||||
"",
|
||||
"export function useValue() {",
|
||||
"\treturn useContext(context) as Value;",
|
||||
"}"
|
||||
]
|
||||
}
|
||||
}
|
||||
93
.vscode/tasks.json
vendored
Normal file
93
.vscode/tasks.json
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"label": "rust: cargo build"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "fmt",
|
||||
"label": "rust: cargo fmt"
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "check",
|
||||
"label": "rust: cargo check"
|
||||
},
|
||||
{
|
||||
"label": "start dev",
|
||||
"dependsOn": [
|
||||
"run core",
|
||||
"start frontend"
|
||||
],
|
||||
"problemMatcher": []
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "yarn start",
|
||||
"label": "start frontend",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/frontend"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run core",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/core"
|
||||
},
|
||||
"presentation": {
|
||||
"group": "start"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run periphery",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/periphery"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "run",
|
||||
"label": "run tests",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/bin/tests"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"args": ["--allow-dirty"],
|
||||
"label": "publish types",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/types"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "cargo",
|
||||
"command": "publish",
|
||||
"label": "publish rs client",
|
||||
"options": {
|
||||
"cwd": "${workspaceFolder}/lib/rs_client"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "shell",
|
||||
"command": "node ./client/ts/generate_types.mjs",
|
||||
"label": "generate typescript types",
|
||||
"problemMatcher": []
|
||||
}
|
||||
]
|
||||
}
|
||||
4612
Cargo.lock
generated
Normal file
4612
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
95
Cargo.toml
Normal file
95
Cargo.toml
Normal file
@@ -0,0 +1,95 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = ["bin/*", "lib/*", "client/core/rs", "client/periphery/rs"]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.4.0"
|
||||
edition = "2021"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/mbecker20/monitor"
|
||||
homepage = "https://docs.monitor.mogh.tech"
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
monitor_client = { path = "client/core/rs" }
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
logger = { path = "lib/logger" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.3.4", default-features = false }
|
||||
slack = { version = "0.1.0", package = "slack_client_rs" }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
termination_signal = "0.1.3"
|
||||
async_timing_util = "0.1.14"
|
||||
partial_derive2 = "0.4.2"
|
||||
derive_variants = "0.1.3"
|
||||
mongo_indexed = "0.3.0"
|
||||
resolver_api = "1.1.0"
|
||||
parse_csl = "0.1.0"
|
||||
mungos = "0.5.6"
|
||||
svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
tokio = { version = "1.37.0", features = ["full"] }
|
||||
reqwest = { version = "0.12.4", features = ["json"] }
|
||||
tokio-util = "0.7.11"
|
||||
futures = "0.3.30"
|
||||
futures-util = "0.3.30"
|
||||
|
||||
# SERVER
|
||||
axum = { version = "0.7.5", features = ["ws", "json"] }
|
||||
axum-extra = { version = "0.9.3", features = ["typed-header"] }
|
||||
tower = { version = "0.4.13", features = ["timeout"] }
|
||||
tower-http = { version = "0.5.2", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = "0.21.0"
|
||||
|
||||
# SER/DE
|
||||
serde = { version = "1.0.201", features = ["derive"] }
|
||||
strum = { version = "0.26.2", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
toml = "0.8.12"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.83"
|
||||
thiserror = "1.0.60"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry_sdk = { version = "0.22.1", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.18", features = ["json"] }
|
||||
tracing-opentelemetry = "0.23.0"
|
||||
opentelemetry-otlp = "0.15.0"
|
||||
opentelemetry = "0.22.0"
|
||||
tracing = "0.1.40"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.4", features = ["derive"] }
|
||||
dotenv = "0.15.0"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO
|
||||
uuid = { version = "1.8.0", features = ["v4", "fast-rng", "serde"] }
|
||||
urlencoding = "2.1.3"
|
||||
rand = "0.8.5"
|
||||
jwt = "0.16.0"
|
||||
hmac = "0.12.1"
|
||||
sha2 = "0.10.8"
|
||||
bcrypt = "0.15.1"
|
||||
hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
bollard = "0.16.1"
|
||||
sysinfo = "0.30.12"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.3.0"
|
||||
aws-sdk-ec2 = "1.40.0"
|
||||
|
||||
# MISC
|
||||
derive_builder = "0.20.0"
|
||||
typeshare = "1.0.3"
|
||||
colored = "2.1.0"
|
||||
bson = "2.10.0"
|
||||
25
bin/alerter/Cargo.toml
Normal file
25
bin/alerter/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "alerter"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client.workspace = true
|
||||
logger.workspace = true
|
||||
# mogh
|
||||
termination_signal.workspace = true
|
||||
# external
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
axum.workspace = true
|
||||
anyhow.workspace = true
|
||||
serde.workspace = true
|
||||
dotenv.workspace = true
|
||||
envy.workspace = true
|
||||
14
bin/alerter/Dockerfile
Normal file
14
bin/alerter/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
FROM rust:1.71.1 as builder
|
||||
WORKDIR /builder
|
||||
|
||||
COPY . .
|
||||
|
||||
RUN cargo build -p alert_logger --release
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=builder /builder/target/release/alert_logger /
|
||||
|
||||
EXPOSE 7000
|
||||
|
||||
CMD ["./alert_logger"]
|
||||
4
bin/alerter/README.md
Normal file
4
bin/alerter/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Alerter
|
||||
|
||||
This crate sets up a basic axum server that listens for incoming alert POSTs.
|
||||
It can be used as a monitor alerting endpoint, and serves as a template for other custom alerter implementations.
|
||||
70
bin/alerter/src/main.rs
Normal file
70
bin/alerter/src/main.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use std::{net::SocketAddr, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{routing::post, Json, Router};
|
||||
use monitor_client::entities::{
|
||||
alert::Alert, server::stats::SeverityLevel,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use termination_signal::tokio::immediate_term_handle;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Env {
|
||||
#[serde(default = "default_port")]
|
||||
port: u16,
|
||||
}
|
||||
|
||||
fn default_port() -> u16 {
|
||||
7000
|
||||
}
|
||||
|
||||
async fn app() -> anyhow::Result<()> {
|
||||
dotenv::dotenv().ok();
|
||||
logger::init(&Default::default())?;
|
||||
|
||||
let Env { port } =
|
||||
envy::from_env().context("failed to parse env")?;
|
||||
|
||||
let socket_addr = SocketAddr::from_str(&format!("0.0.0.0:{port}"))
|
||||
.context("invalid socket addr")?;
|
||||
|
||||
info!("v {} | {socket_addr}", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
let app = Router::new().route(
|
||||
"/",
|
||||
post(|Json(alert): Json<Alert>| async move {
|
||||
if alert.resolved {
|
||||
info!("Alert Resolved!: {alert:?}");
|
||||
return;
|
||||
}
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => info!("{alert:?}"),
|
||||
SeverityLevel::Warning => warn!("{alert:?}"),
|
||||
SeverityLevel::Critical => error!("{alert:?}"),
|
||||
}
|
||||
}),
|
||||
);
|
||||
|
||||
let listener = tokio::net::TcpListener::bind(socket_addr)
|
||||
.await
|
||||
.context("failed to bind tcp listener")?;
|
||||
|
||||
axum::serve(listener, app).await.context("server crashed")
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
let term_signal = immediate_term_handle()?;
|
||||
|
||||
let app = tokio::spawn(app());
|
||||
|
||||
tokio::select! {
|
||||
res = app => return res?,
|
||||
_ = term_signal => {},
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
36
bin/cli/Cargo.toml
Normal file
36
bin/cli/Cargo.toml
Normal file
@@ -0,0 +1,36 @@
|
||||
[package]
|
||||
name = "monitor_cli"
|
||||
description = "Command line tool to sync monitor resources and execute file defined procedures"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "monitor"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[patch.crates-io]
|
||||
monitor_client.workspace = true
|
||||
|
||||
[dependencies]
|
||||
# local client
|
||||
monitor_client = "1.3.0"
|
||||
# mogh
|
||||
partial_derive2.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
serde_json.workspace = true
|
||||
futures.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
anyhow.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
toml.workspace = true
|
||||
clap.workspace = true
|
||||
40
bin/cli/README.md
Normal file
40
bin/cli/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
# Monitor CLI
|
||||
|
||||
Monitor CLI is a tool to sync monitor resources and execute file defined procedures.
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
cargo install monitor_cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Configure a file `~/.config/monitor/creds.toml` file with contents:
|
||||
```toml
|
||||
url = "https://your.monitor.address"
|
||||
key = "YOUR-API-KEY"
|
||||
secret = "YOUR-API-SECRET"
|
||||
```
|
||||
|
||||
Note. You can specify a different creds file by using `--creds ./other/path.toml`.
|
||||
|
||||
With your creds in place, you can run syncs:
|
||||
|
||||
```sh
|
||||
## Sync resources in a single file
|
||||
monitor sync ./resources/deployments.toml
|
||||
|
||||
## Sync resources gathered across multiple files in a directory
|
||||
monitor sync ./resources
|
||||
|
||||
## Path defaults to './resources', in this case you can just use:
|
||||
monitor sync
|
||||
```
|
||||
|
||||
And executions:
|
||||
|
||||
```sh
|
||||
## Execute a TOML defined procedure
|
||||
monitor exec ./execution/execution.toml
|
||||
```
|
||||
0
bin/cli/exec/exec1.toml
Normal file
0
bin/cli/exec/exec1.toml
Normal file
83
bin/cli/resources/builds.toml
Normal file
83
bin/cli/resources/builds.toml
Normal file
@@ -0,0 +1,83 @@
|
||||
[[build]]
|
||||
name = "monitor_core"
|
||||
description = "Public monitor core build"
|
||||
tags = ["monitor"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "bin/core/Dockerfile"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_core_dev"
|
||||
description = ""
|
||||
tags = ["monitor", "dev"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "bin/core/Dockerfile"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_frontend"
|
||||
description = "standalone hosted frontend for monitor.mogh.tech"
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "frontend/Dockerfile"
|
||||
|
||||
[[build.config.build_args]]
|
||||
variable = "VITE_MONITOR_HOST"
|
||||
value = "https://monitor.api.mogh.tech"
|
||||
|
||||
[[build]]
|
||||
name = "monitor_frontend_dev"
|
||||
description = "standalone hosted frontend for monitor-dev.mogh.tech"
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[build.config]
|
||||
builder_id = "mogh-builder"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
docker_account = "mbecker2020"
|
||||
build_path = "."
|
||||
dockerfile_path = "frontend/Dockerfile"
|
||||
|
||||
[[build.config.build_args]]
|
||||
variable = "VITE_MONITOR_HOST"
|
||||
value = "https://monitor-dev.api.mogh.tech"
|
||||
|
||||
## BUILDER
|
||||
|
||||
[[builder]]
|
||||
name = "mogh-builder"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[builder.config]
|
||||
type = "Aws"
|
||||
|
||||
[builder.config.params]
|
||||
region = "us-east-2"
|
||||
instance_type = "c5.2xlarge"
|
||||
volume_gb = 20
|
||||
port = 8120
|
||||
ami_id = "ami-0005a05fa63a080ab"
|
||||
subnet_id = "subnet-02ae5ad480eacc4bc"
|
||||
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
|
||||
key_pair_name = "mogh-key"
|
||||
assign_public_ip = true
|
||||
use_public_ip = false
|
||||
github_accounts = []
|
||||
docker_accounts = []
|
||||
213
bin/cli/resources/deployments.toml
Normal file
213
bin/cli/resources/deployments.toml
Normal file
@@ -0,0 +1,213 @@
|
||||
## MONITOR PROXY
|
||||
[[deployment]]
|
||||
name = "monitor-proxy"
|
||||
description = "An NGINX proxy for mogh.tech"
|
||||
tags = ["monitor"]
|
||||
config.server_id = "monitor-01"
|
||||
config.network = "host"
|
||||
config.restart = "on-failure"
|
||||
config.image.type = "Image"
|
||||
config.image.params.image = "jc21/nginx-proxy-manager"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/nginx/data"
|
||||
container = "/data"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/nginx/letsencrypt"
|
||||
container = "/etc/letsencrypt"
|
||||
|
||||
## MONITOR MONGO
|
||||
[[deployment]]
|
||||
name = "monitor-mongo"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "mongo"
|
||||
|
||||
## MONITOR CORE
|
||||
[[deployment]]
|
||||
name = "monitor-core"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "mbecker2020/monitor_core"
|
||||
|
||||
## GRAFANA
|
||||
[[deployment]]
|
||||
name = "grafana"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/grafana"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/grafana"
|
||||
container = "/var/lib/grafana"
|
||||
|
||||
[[deployment.config.environment]]
|
||||
variable = "GF_SERVER_HTTP_PORT"
|
||||
value = "3080"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## LOKI
|
||||
[[deployment]]
|
||||
name = "loki"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/loki"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/loki"
|
||||
container = "/loki"
|
||||
|
||||
[[deployment]]
|
||||
name = "tempo"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
command = "-server.http-listen-port=3200 -server.grpc-listen-port=9096 --storage.trace.backend=local --storage.trace.local.path=/tmp/tempo/traces --storage.trace.wal.path=/tmp/tempo/wal"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "grafana/tempo"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/tempo"
|
||||
container = "/tmp/tempo"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## VECTOR
|
||||
[[deployment]]
|
||||
name = "vector"
|
||||
description = ""
|
||||
tags = ["logging"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
command = "--config /etc/vector/*.toml"
|
||||
extra_args = ["--user root"]
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Image"
|
||||
params.image = "timberio/vector:latest-debian"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/home/ubuntu/.config/vector"
|
||||
container = "/etc/vector"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/vector"
|
||||
container = "/var/lib/vector"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/var/run/docker.sock"
|
||||
container = "/var/run/docker.sock"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "key-value"
|
||||
|
||||
## MONITOR CORE DEV
|
||||
[[deployment]]
|
||||
name = "monitor-core-dev"
|
||||
description = ""
|
||||
tags = ["monitor", "dev"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "no"
|
||||
|
||||
[deployment.config.image]
|
||||
type = "Build"
|
||||
params.build_id = "monitor_core"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/home/ubuntu/.config/monitor/dev.core.config.toml"
|
||||
container = "/config/config.toml"
|
||||
|
||||
[[deployment.config.volumes]]
|
||||
local = "/data/repos/monitor-dev-frontend/frontend/dist"
|
||||
container = "/frontend"
|
||||
|
||||
[[deployment.config.labels]]
|
||||
variable = "vector"
|
||||
value = "rust"
|
||||
|
||||
## MONITOR FRONTEND
|
||||
[[deployment]]
|
||||
name = "monitor-frontend"
|
||||
description = ""
|
||||
tags = ["monitor", "frontend"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
image.type = "Build"
|
||||
image.params.build = "monitor_frontend"
|
||||
|
||||
## MONITOR DEV FRONTEND
|
||||
[[deployment]]
|
||||
name = "monitor-dev-frontend"
|
||||
description = ""
|
||||
tags = ["monitor", "dev", "frontend"]
|
||||
|
||||
[deployment.config]
|
||||
server_id = "monitor-01"
|
||||
redeploy_on_build = true
|
||||
network = "host"
|
||||
restart = "unless-stopped"
|
||||
image.type = "Build"
|
||||
image.params.build = "monitor_frontend_dev"
|
||||
|
||||
[[deployment.config.environment]]
|
||||
variable = "PORT"
|
||||
value = "4175"
|
||||
8
bin/cli/resources/procedures.toml
Normal file
8
bin/cli/resources/procedures.toml
Normal file
@@ -0,0 +1,8 @@
|
||||
[[procedure]]
|
||||
name = "test-procedure"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[procedure.config]
|
||||
procedure_type = "Sequence"
|
||||
executions = []
|
||||
37
bin/cli/resources/repos.toml
Normal file
37
bin/cli/resources/repos.toml
Normal file
@@ -0,0 +1,37 @@
|
||||
# [[repo]]
|
||||
# name = "monitor-dev-frontend"
|
||||
# description = "Used as frontend for monitor-core-dev"
|
||||
# tags = ["monitor", "dev"]
|
||||
|
||||
# [repo.config]
|
||||
# server_id = "monitor-01"
|
||||
# repo = "mbecker20/monitor"
|
||||
# branch = "main"
|
||||
# github_account = ""
|
||||
|
||||
# [repo.config.on_clone]
|
||||
# path = ""
|
||||
# command = ""
|
||||
|
||||
# [repo.config.on_pull]
|
||||
# path = "frontend"
|
||||
# command = "sh on_pull.sh"
|
||||
|
||||
[[repo]]
|
||||
name = "monitor-periphery"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[repo.config]
|
||||
server_id = "monitor-01"
|
||||
repo = "mbecker20/monitor"
|
||||
branch = "main"
|
||||
github_account = ""
|
||||
|
||||
[repo.config.on_clone]
|
||||
path = ""
|
||||
command = ""
|
||||
|
||||
[repo.config.on_pull]
|
||||
path = "."
|
||||
command = "/root/.cargo/bin/cargo build -p monitor_periphery --release && cp ./target/release/periphery /home/ubuntu/periphery"
|
||||
51
bin/cli/resources/servers.toml
Normal file
51
bin/cli/resources/servers.toml
Normal file
@@ -0,0 +1,51 @@
|
||||
[[server]]
|
||||
name = "monitor-01"
|
||||
description = ""
|
||||
tags = ["monitor"]
|
||||
|
||||
[server.config]
|
||||
address = "http://localhost:8120"
|
||||
enabled = true
|
||||
stats_monitoring = true
|
||||
auto_prune = true
|
||||
send_unreachable_alerts = true
|
||||
send_cpu_alerts = true
|
||||
send_mem_alerts = true
|
||||
send_disk_alerts = true
|
||||
region = "us-east-2"
|
||||
|
||||
## TEMPLATE
|
||||
|
||||
[[server_template]]
|
||||
name = "mogh-template"
|
||||
description = ""
|
||||
tags = []
|
||||
|
||||
[server_template.config]
|
||||
type = "Aws"
|
||||
|
||||
[server_template.config.params]
|
||||
region = "us-east-2"
|
||||
instance_type = "t3.medium"
|
||||
ami_id = "ami-0005a05fa63a080ab"
|
||||
subnet_id = "subnet-02ae5ad480eacc4bc"
|
||||
key_pair_name = "mogh-key"
|
||||
assign_public_ip = true
|
||||
use_public_ip = false
|
||||
port = 8120
|
||||
user_data = ""
|
||||
security_group_ids = ["sg-049d98c819f9ace58", "sg-006c0ca638af8eb44"]
|
||||
|
||||
[[server_template.config.params.volumes]]
|
||||
device_name = "/dev/sda1"
|
||||
size_gb = 20
|
||||
volume_type = "gp2"
|
||||
iops = 0
|
||||
throughput = 0
|
||||
|
||||
[[server_template.config.params.volumes]]
|
||||
device_name = "/dev/sdb"
|
||||
size_gb = 10
|
||||
volume_type = "gp3"
|
||||
iops = 0
|
||||
throughput = 0
|
||||
189
bin/cli/src/execution.rs
Normal file
189
bin/cli/src/execution.rs
Normal file
@@ -0,0 +1,189 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::api::execute;
|
||||
use serde::Deserialize;
|
||||
use strum::Display;
|
||||
|
||||
use crate::monitor_client;
|
||||
|
||||
pub async fn run_execution(path: &Path) -> anyhow::Result<()> {
|
||||
let ExecutionFile { name, stages } = crate::parse_toml_file(path)?;
|
||||
|
||||
info!("EXECUTION: {name}");
|
||||
info!("path: {path:?}");
|
||||
println!("{stages:#?}");
|
||||
|
||||
crate::wait_for_enter("EXECUTE")?;
|
||||
|
||||
run_stages(stages)
|
||||
.await
|
||||
.context("failed during a stage. terminating run.")?;
|
||||
|
||||
info!("finished successfully ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Specifies sequence of stages (build / deploy) on resources
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct ExecutionFile {
|
||||
pub name: String,
|
||||
#[serde(rename = "stage")]
|
||||
pub stages: Vec<Stage>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct Stage {
|
||||
pub name: String,
|
||||
pub action: ExecutionType,
|
||||
/// resource names
|
||||
pub targets: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Deserialize, Display)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
#[strum(serialize_all = "snake_case")]
|
||||
pub enum ExecutionType {
|
||||
Build,
|
||||
Deploy,
|
||||
StartContainer,
|
||||
StopContainer,
|
||||
DestroyContainer,
|
||||
}
|
||||
|
||||
pub async fn run_stages(stages: Vec<Stage>) -> anyhow::Result<()> {
|
||||
for Stage {
|
||||
name,
|
||||
action,
|
||||
targets,
|
||||
} in stages
|
||||
{
|
||||
info!("running {action} stage: {name}... ⏳");
|
||||
match action {
|
||||
ExecutionType::Build => {
|
||||
trigger_builds_in_parallel(&targets).await?;
|
||||
}
|
||||
ExecutionType::Deploy => {
|
||||
redeploy_deployments_in_parallel(&targets).await?;
|
||||
}
|
||||
ExecutionType::StartContainer => {
|
||||
start_containers_in_parallel(&targets).await?
|
||||
}
|
||||
ExecutionType::StopContainer => {
|
||||
stop_containers_in_parallel(&targets).await?
|
||||
}
|
||||
ExecutionType::DestroyContainer => {
|
||||
destroy_containers_in_parallel(&targets).await?;
|
||||
}
|
||||
}
|
||||
info!("finished {action} stage: {name} ✅");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn redeploy_deployments_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::Deploy { deployment: deployment.to_string(), stop_signal: None, stop_time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to deploy {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to deploy {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn start_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::StartContainer { deployment: deployment.to_string() })
|
||||
.await
|
||||
.with_context(|| format!("failed to start container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to start container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn stop_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::StopContainer { deployment: deployment.to_string(), signal: None, time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to stop container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to stop container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn destroy_containers_in_parallel(
|
||||
deployments: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = deployments.iter().map(|deployment| async move {
|
||||
monitor_client()
|
||||
.execute(execute::RemoveContainer { deployment: deployment.to_string(), signal: None, time: None })
|
||||
.await
|
||||
.with_context(|| format!("failed to destroy container {deployment}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to destroy container {deployment}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
|
||||
async fn trigger_builds_in_parallel(
|
||||
builds: &[String],
|
||||
) -> anyhow::Result<()> {
|
||||
let futes = builds.iter().map(|build| async move {
|
||||
monitor_client()
|
||||
.execute(execute::RunBuild { build: build.to_string() })
|
||||
.await
|
||||
.with_context(|| format!("failed to build {build}"))
|
||||
.and_then(|update| {
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"failed to build {build}. operation unsuccessful, see monitor update"
|
||||
))
|
||||
}
|
||||
})
|
||||
});
|
||||
join_all(futes).await.into_iter().collect()
|
||||
}
|
||||
113
bin/cli/src/main.rs
Normal file
113
bin/cli/src/main.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use std::{io::Read, path::PathBuf, str::FromStr, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::{Parser, Subcommand};
|
||||
use colored::Colorize;
|
||||
use monitor_client::{api::read, MonitorClient};
|
||||
use serde::{de::DeserializeOwned, Deserialize};
|
||||
|
||||
mod execution;
|
||||
mod maps;
|
||||
mod sync;
|
||||
|
||||
fn cli_args() -> &'static CliArgs {
|
||||
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(CliArgs::parse)
|
||||
}
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
command: Command,
|
||||
/// The path to a creds file.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
creds: String,
|
||||
/// Log less (just resource names).
|
||||
#[arg(long, default_value_t = false)]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home = std::env::var("HOME")
|
||||
.expect("no HOME env var. cannot get default config path.");
|
||||
format!("{home}/.config/monitor/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
enum Command {
|
||||
/// Runs syncs on resource files
|
||||
Sync {
|
||||
/// The path of the resource folder / file
|
||||
/// Folder paths will recursively incorporate all the resources it finds under the folder
|
||||
#[arg(default_value_t = String::from("./resources"))]
|
||||
path: String,
|
||||
},
|
||||
|
||||
/// Runs execution files
|
||||
Exec {
|
||||
/// The path of the exec file
|
||||
path: PathBuf,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct CredsFile {
|
||||
url: String,
|
||||
key: String,
|
||||
secret: String,
|
||||
}
|
||||
|
||||
fn monitor_client() -> &'static MonitorClient {
|
||||
static MONITOR_CLIENT: OnceLock<MonitorClient> = OnceLock::new();
|
||||
MONITOR_CLIENT.get_or_init(|| {
|
||||
let CredsFile { url, key, secret } =
|
||||
parse_toml_file(&cli_args().creds)
|
||||
.expect("failed to parse monitor credentials");
|
||||
futures::executor::block_on(MonitorClient::new(url, key, secret))
|
||||
.expect("failed to initialize monitor client")
|
||||
})
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
let version =
|
||||
monitor_client().read(read::GetVersion {}).await?.version;
|
||||
info!("monitor version: {}", version.to_string().blue().bold());
|
||||
|
||||
match &cli_args().command {
|
||||
Command::Exec { path } => execution::run_execution(path).await?,
|
||||
Command::Sync { path } => {
|
||||
sync::run_sync(&PathBuf::from_str(path)?).await?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_toml_file<T: DeserializeOwned>(
|
||||
path: impl AsRef<std::path::Path>,
|
||||
) -> anyhow::Result<T> {
|
||||
let contents = std::fs::read_to_string(path)
|
||||
.context("failed to read file contents")?;
|
||||
toml::from_str(&contents).context("failed to parse toml contents")
|
||||
}
|
||||
|
||||
fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
293
bin/cli/src/maps.rs
Normal file
293
bin/cli/src/maps.rs
Normal file
@@ -0,0 +1,293 @@
|
||||
use std::{collections::HashMap, sync::OnceLock};
|
||||
|
||||
use monitor_client::{
|
||||
api::read,
|
||||
entities::{
|
||||
alerter::AlerterListItem, build::BuildListItem,
|
||||
builder::BuilderListItem, deployment::DeploymentListItem,
|
||||
procedure::ProcedureListItem, repo::RepoListItem,
|
||||
server::ServerListItem, server_template::ServerTemplateListItem,
|
||||
tag::Tag, user::User, user_group::UserGroup,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::monitor_client;
|
||||
|
||||
pub fn name_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
static NAME_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
.map(|build| (build.name.clone(), build))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_build() -> &'static HashMap<String, BuildListItem> {
|
||||
static ID_TO_BUILD: OnceLock<HashMap<String, BuildListItem>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILD.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilds::default()),
|
||||
)
|
||||
.expect("failed to get builds from monitor")
|
||||
.into_iter()
|
||||
.map(|build| (build.id.clone(), build))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_deployment(
|
||||
) -> &'static HashMap<String, DeploymentListItem> {
|
||||
static NAME_TO_DEPLOYMENT: OnceLock<
|
||||
HashMap<String, DeploymentListItem>,
|
||||
> = OnceLock::new();
|
||||
NAME_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
.map(|deployment| (deployment.name.clone(), deployment))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_deployment(
|
||||
) -> &'static HashMap<String, DeploymentListItem> {
|
||||
static ID_TO_DEPLOYMENT: OnceLock<
|
||||
HashMap<String, DeploymentListItem>,
|
||||
> = OnceLock::new();
|
||||
ID_TO_DEPLOYMENT.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListDeployments::default()),
|
||||
)
|
||||
.expect("failed to get deployments from monitor")
|
||||
.into_iter()
|
||||
.map(|deployment| (deployment.id.clone(), deployment))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
static NAME_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
.map(|server| (server.name.clone(), server))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_server() -> &'static HashMap<String, ServerListItem> {
|
||||
static ID_TO_SERVER: OnceLock<HashMap<String, ServerListItem>> =
|
||||
OnceLock::new();
|
||||
ID_TO_SERVER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServers::default()),
|
||||
)
|
||||
.expect("failed to get servers from monitor")
|
||||
.into_iter()
|
||||
.map(|server| (server.id.clone(), server))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_builder() -> &'static HashMap<String, BuilderListItem>
|
||||
{
|
||||
static NAME_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
.map(|builder| (builder.name.clone(), builder))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_builder() -> &'static HashMap<String, BuilderListItem> {
|
||||
static ID_TO_BUILDER: OnceLock<HashMap<String, BuilderListItem>> =
|
||||
OnceLock::new();
|
||||
ID_TO_BUILDER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListBuilders::default()),
|
||||
)
|
||||
.expect("failed to get builders from monitor")
|
||||
.into_iter()
|
||||
.map(|builder| (builder.id.clone(), builder))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_alerter() -> &'static HashMap<String, AlerterListItem>
|
||||
{
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
.map(|alerter| (alerter.name.clone(), alerter))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_alerter() -> &'static HashMap<String, AlerterListItem> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, AlerterListItem>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListAlerters::default()),
|
||||
)
|
||||
.expect("failed to get alerters from monitor")
|
||||
.into_iter()
|
||||
.map(|alerter| (alerter.id.clone(), alerter))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
static NAME_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
.map(|repo| (repo.name.clone(), repo))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_repo() -> &'static HashMap<String, RepoListItem> {
|
||||
static ID_TO_ALERTER: OnceLock<HashMap<String, RepoListItem>> =
|
||||
OnceLock::new();
|
||||
ID_TO_ALERTER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListRepos::default()),
|
||||
)
|
||||
.expect("failed to get repos from monitor")
|
||||
.into_iter()
|
||||
.map(|repo| (repo.id.clone(), repo))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_procedure(
|
||||
) -> &'static HashMap<String, ProcedureListItem> {
|
||||
static NAME_TO_PROCEDURE: OnceLock<
|
||||
HashMap<String, ProcedureListItem>,
|
||||
> = OnceLock::new();
|
||||
NAME_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.name.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_procedure() -> &'static HashMap<String, ProcedureListItem>
|
||||
{
|
||||
static ID_TO_PROCEDURE: OnceLock<
|
||||
HashMap<String, ProcedureListItem>,
|
||||
> = OnceLock::new();
|
||||
ID_TO_PROCEDURE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListProcedures::default()),
|
||||
)
|
||||
.expect("failed to get procedures from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.id.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplateListItem> {
|
||||
static NAME_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplateListItem>,
|
||||
> = OnceLock::new();
|
||||
NAME_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.name.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_server_template(
|
||||
) -> &'static HashMap<String, ServerTemplateListItem> {
|
||||
static ID_TO_SERVER_TEMPLATE: OnceLock<
|
||||
HashMap<String, ServerTemplateListItem>,
|
||||
> = OnceLock::new();
|
||||
ID_TO_SERVER_TEMPLATE.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListServerTemplates::default()),
|
||||
)
|
||||
.expect("failed to get server templates from monitor")
|
||||
.into_iter()
|
||||
.map(|procedure| (procedure.id.clone(), procedure))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn name_to_user_group() -> &'static HashMap<String, UserGroup> {
|
||||
static NAME_TO_USER_GROUP: OnceLock<HashMap<String, UserGroup>> =
|
||||
OnceLock::new();
|
||||
NAME_TO_USER_GROUP.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListUserGroups::default()),
|
||||
)
|
||||
.expect("failed to get user groups from monitor")
|
||||
.into_iter()
|
||||
.map(|user_group| (user_group.name.clone(), user_group))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_user() -> &'static HashMap<String, User> {
|
||||
static ID_TO_USER: OnceLock<HashMap<String, User>> =
|
||||
OnceLock::new();
|
||||
ID_TO_USER.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListUsers::default()),
|
||||
)
|
||||
.expect("failed to get users from monitor")
|
||||
.into_iter()
|
||||
.map(|user| (user.id.clone(), user))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn id_to_tag() -> &'static HashMap<String, Tag> {
|
||||
static ID_TO_TAG: OnceLock<HashMap<String, Tag>> = OnceLock::new();
|
||||
ID_TO_TAG.get_or_init(|| {
|
||||
futures::executor::block_on(
|
||||
monitor_client().read(read::ListTags::default()),
|
||||
)
|
||||
.expect("failed to get tags from monitor")
|
||||
.into_iter()
|
||||
.map(|tag| (tag.id.clone(), tag))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
63
bin/cli/src/sync/file.rs
Normal file
63
bin/cli/src/sync/file.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
use std::{fs, path::Path};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::toml::ResourcesToml;
|
||||
|
||||
pub fn read_resources(path: &Path) -> anyhow::Result<ResourcesToml> {
|
||||
let mut res = ResourcesToml::default();
|
||||
read_resources_recursive(path, &mut res)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
fn read_resources_recursive(
|
||||
path: &Path,
|
||||
resources: &mut ResourcesToml,
|
||||
) -> anyhow::Result<()> {
|
||||
let res =
|
||||
fs::metadata(path).context("failed to get path metadata")?;
|
||||
if res.is_file() {
|
||||
if !path
|
||||
.extension()
|
||||
.map(|ext| ext == "toml")
|
||||
.unwrap_or_default()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
let more = match crate::parse_toml_file::<ResourcesToml>(path) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
warn!("failed to parse {:?}. skipping file | {e:#}", path);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
info!(
|
||||
"{} from {}",
|
||||
"adding resources".green().bold(),
|
||||
path.display().to_string().blue().bold()
|
||||
);
|
||||
resources.server_templates.extend(more.server_templates);
|
||||
resources.servers.extend(more.servers);
|
||||
resources.builds.extend(more.builds);
|
||||
resources.deployments.extend(more.deployments);
|
||||
resources.builders.extend(more.builders);
|
||||
resources.repos.extend(more.repos);
|
||||
resources.alerters.extend(more.alerters);
|
||||
resources.procedures.extend(more.procedures);
|
||||
resources.user_groups.extend(more.user_groups);
|
||||
Ok(())
|
||||
} else if res.is_dir() {
|
||||
let directory = fs::read_dir(path)
|
||||
.context("failed to read directory contents")?;
|
||||
for entry in directory.into_iter().flatten() {
|
||||
if let Err(e) =
|
||||
read_resources_recursive(&entry.path(), resources)
|
||||
{
|
||||
warn!("failed to read additional resources at path | {e:#}");
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!("resources path is neither file nor directory"))
|
||||
}
|
||||
}
|
||||
96
bin/cli/src/sync/mod.rs
Normal file
96
bin/cli/src/sync/mod.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use std::path::Path;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
};
|
||||
|
||||
use crate::{sync::resources::ResourceSync, wait_for_enter};
|
||||
|
||||
mod file;
|
||||
mod resources;
|
||||
mod user_group;
|
||||
|
||||
pub async fn run_sync(path: &Path) -> anyhow::Result<()> {
|
||||
info!(
|
||||
"resources path: {}",
|
||||
path.display().to_string().blue().bold()
|
||||
);
|
||||
|
||||
let resources = file::read_resources(path)?;
|
||||
|
||||
info!("computing sync actions...");
|
||||
|
||||
let (server_template_creates, server_template_updates) =
|
||||
ServerTemplate::get_updates(resources.server_templates).await?;
|
||||
let (server_creates, server_updates) =
|
||||
Server::get_updates(resources.servers).await?;
|
||||
let (deployment_creates, deployment_updates) =
|
||||
Deployment::get_updates(resources.deployments).await?;
|
||||
let (build_creates, build_updates) =
|
||||
Build::get_updates(resources.builds).await?;
|
||||
let (builder_creates, builder_updates) =
|
||||
Builder::get_updates(resources.builders).await?;
|
||||
let (alerter_creates, alerter_updates) =
|
||||
Alerter::get_updates(resources.alerters).await?;
|
||||
let (repo_creates, repo_updates) =
|
||||
Repo::get_updates(resources.repos).await?;
|
||||
let (procedure_creates, procedure_updates) =
|
||||
Procedure::get_updates(resources.procedures).await?;
|
||||
let (user_group_creates, user_group_updates) =
|
||||
user_group::get_updates(resources.user_groups).await?;
|
||||
|
||||
if server_template_creates.is_empty()
|
||||
&& server_template_updates.is_empty()
|
||||
&& server_creates.is_empty()
|
||||
&& server_updates.is_empty()
|
||||
&& deployment_creates.is_empty()
|
||||
&& deployment_updates.is_empty()
|
||||
&& build_creates.is_empty()
|
||||
&& build_updates.is_empty()
|
||||
&& builder_creates.is_empty()
|
||||
&& builder_updates.is_empty()
|
||||
&& alerter_creates.is_empty()
|
||||
&& alerter_updates.is_empty()
|
||||
&& repo_creates.is_empty()
|
||||
&& repo_updates.is_empty()
|
||||
&& procedure_creates.is_empty()
|
||||
&& procedure_updates.is_empty()
|
||||
&& user_group_creates.is_empty()
|
||||
&& user_group_updates.is_empty()
|
||||
{
|
||||
info!("{}. exiting.", "nothing to do".green().bold());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
wait_for_enter("run sync")?;
|
||||
|
||||
// No deps
|
||||
ServerTemplate::run_updates(
|
||||
server_template_creates,
|
||||
server_template_updates,
|
||||
)
|
||||
.await;
|
||||
Server::run_updates(server_creates, server_updates).await;
|
||||
Alerter::run_updates(alerter_creates, alerter_updates).await;
|
||||
|
||||
// Dependant on server
|
||||
Builder::run_updates(builder_creates, builder_updates).await;
|
||||
Repo::run_updates(repo_creates, repo_updates).await;
|
||||
|
||||
// Dependant on builder
|
||||
Build::run_updates(build_creates, build_updates).await;
|
||||
|
||||
// Dependant on server / builder
|
||||
Deployment::run_updates(deployment_creates, deployment_updates)
|
||||
.await;
|
||||
|
||||
// Dependant on everything
|
||||
Procedure::run_updates(procedure_creates, procedure_updates).await;
|
||||
user_group::run_updates(user_group_creates, user_group_updates)
|
||||
.await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
81
bin/cli/src/sync/resources/alerter.rs
Normal file
81
bin/cli/src/sync/resources/alerter.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetAlerter,
|
||||
write::{CreateAlerter, UpdateAlerter},
|
||||
},
|
||||
entities::{
|
||||
alerter::{
|
||||
Alerter, AlerterConfig, AlerterConfigDiff, AlerterInfo, AlerterListItemInfo, PartialAlerterConfig
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_alerter, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Alerter {
|
||||
type Config = AlerterConfig;
|
||||
type Info = AlerterInfo;
|
||||
type PartialConfig = PartialAlerterConfig;
|
||||
type ConfigDiff = AlerterConfigDiff;
|
||||
type ListItemInfo = AlerterListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"alerter"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Alerter(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_alerter()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateAlerter {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateAlerter {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetAlerter { alerter: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
91
bin/cli/src/sync/resources/build.rs
Normal file
91
bin/cli/src/sync/resources/build.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetBuild,
|
||||
write::{CreateBuild, UpdateBuild},
|
||||
},
|
||||
entities::{
|
||||
build::{
|
||||
Build, BuildConfig, BuildConfigDiff, BuildInfo,
|
||||
BuildListItemInfo, PartialBuildConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_builder, name_to_build},
|
||||
monitor_client,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Build {
|
||||
type Config = BuildConfig;
|
||||
type Info = BuildInfo;
|
||||
type PartialConfig = PartialBuildConfig;
|
||||
type ConfigDiff = BuildConfigDiff;
|
||||
type ListItemInfo = BuildListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"build"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Build(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_build()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateBuild {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateBuild {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetBuild { build: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace the builder id with name
|
||||
original.builder_id = id_to_builder()
|
||||
.get(&original.builder_id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
93
bin/cli/src/sync/resources/builder.rs
Normal file
93
bin/cli/src/sync/resources/builder.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetBuilder,
|
||||
write::{CreateBuilder, UpdateBuilder},
|
||||
},
|
||||
entities::{
|
||||
builder::{
|
||||
Builder, BuilderConfig, BuilderConfigDiff, BuilderListItemInfo,
|
||||
PartialBuilderConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_builder},
|
||||
monitor_client,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Builder {
|
||||
type Config = BuilderConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialBuilderConfig;
|
||||
type ConfigDiff = BuilderConfigDiff;
|
||||
type ListItemInfo = BuilderListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"builder"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Builder(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_builder()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateBuilder {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateBuilder {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetBuilder { builder: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace server builder id with name
|
||||
if let BuilderConfig::Server(config) = &mut original {
|
||||
config.server_id = id_to_server()
|
||||
.get(&config.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
104
bin/cli/src/sync/resources/deployment.rs
Normal file
104
bin/cli/src/sync/resources/deployment.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{read::GetDeployment, write},
|
||||
entities::{
|
||||
deployment::{
|
||||
Deployment, DeploymentConfig, DeploymentConfigDiff,
|
||||
DeploymentImage, DeploymentListItemInfo,
|
||||
PartialDeploymentConfig,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_build, id_to_server, name_to_deployment},
|
||||
monitor_client,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Deployment {
|
||||
type Config = DeploymentConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialDeploymentConfig;
|
||||
type ConfigDiff = DeploymentConfigDiff;
|
||||
type ListItemInfo = DeploymentListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"deployment"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Deployment(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_deployment()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(write::CreateDeployment {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(write::UpdateDeployment {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client()
|
||||
.read(GetDeployment { deployment: id })
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// need to replace the server id with name
|
||||
original.server_id = id_to_server()
|
||||
.get(&original.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
// need to replace the build id with name
|
||||
if let DeploymentImage::Build { build_id, version } =
|
||||
&original.image
|
||||
{
|
||||
original.image = DeploymentImage::Build {
|
||||
build_id: id_to_build()
|
||||
.get(build_id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default(),
|
||||
version: version.clone(),
|
||||
};
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
327
bin/cli/src/sync/resources/mod.rs
Normal file
327
bin/cli/src/sync/resources/mod.rs
Normal file
@@ -0,0 +1,327 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use colored::Colorize;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateTagsOnResource},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{Diff, FieldDiff, MaybeNone, PartialDiff};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::{cli_args, maps::id_to_tag, monitor_client};
|
||||
|
||||
pub mod alerter;
|
||||
pub mod build;
|
||||
pub mod builder;
|
||||
pub mod deployment;
|
||||
pub mod procedure;
|
||||
pub mod repo;
|
||||
pub mod server;
|
||||
pub mod server_template;
|
||||
|
||||
type ToUpdate<T> = Vec<ToUpdateItem<T>>;
|
||||
type ToCreate<T> = Vec<ResourceToml<T>>;
|
||||
type UpdatesResult<T> = (ToCreate<T>, ToUpdate<T>);
|
||||
|
||||
pub struct ToUpdateItem<T> {
|
||||
pub id: String,
|
||||
pub resource: ResourceToml<T>,
|
||||
pub update_description: bool,
|
||||
pub update_tags: bool,
|
||||
}
|
||||
|
||||
pub trait ResourceSync {
|
||||
type Config: Clone
|
||||
+ Send
|
||||
+ PartialDiff<Self::PartialConfig, Self::ConfigDiff>
|
||||
+ 'static;
|
||||
type Info: Default;
|
||||
type PartialConfig: std::fmt::Debug
|
||||
+ Clone
|
||||
+ Send
|
||||
+ From<Self::ConfigDiff>
|
||||
+ Serialize
|
||||
+ 'static;
|
||||
type ConfigDiff: Diff + MaybeNone;
|
||||
type ListItemInfo: 'static;
|
||||
|
||||
fn display() -> &'static str;
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget;
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>;
|
||||
|
||||
/// Creates the resource and returns created id.
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String>;
|
||||
|
||||
/// Updates the resource at id with the partial config.
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()>;
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>>;
|
||||
|
||||
/// Diffs the declared toml (partial) against the full existing config.
|
||||
/// Removes all fields from toml (partial) that haven't changed.
|
||||
async fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff>;
|
||||
|
||||
async fn get_updates(
|
||||
resources: Vec<ResourceToml<Self::PartialConfig>>,
|
||||
) -> anyhow::Result<UpdatesResult<Self::PartialConfig>> {
|
||||
let map = Self::name_to_resource();
|
||||
|
||||
let mut to_create = ToCreate::<Self::PartialConfig>::new();
|
||||
let mut to_update = ToUpdate::<Self::PartialConfig>::new();
|
||||
|
||||
let quiet = cli_args().quiet;
|
||||
|
||||
for mut resource in resources {
|
||||
match map.get(&resource.name).map(|s| s.id.clone()) {
|
||||
Some(id) => {
|
||||
// Get the full original config for the resource.
|
||||
let original = Self::get(id.clone()).await?;
|
||||
|
||||
let diff =
|
||||
Self::get_diff(original.config, resource.config).await?;
|
||||
|
||||
let original_tags = original
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|id| {
|
||||
id_to_tag().get(id).map(|t| t.name.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Only proceed if there are any fields to update,
|
||||
// or a change to tags / description
|
||||
if diff.is_none()
|
||||
&& resource.description == original.description
|
||||
&& resource.tags == original_tags
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if !quiet {
|
||||
println!(
|
||||
"\n{}: {}: '{}'\n-------------------",
|
||||
"UPDATE".blue(),
|
||||
Self::display(),
|
||||
resource.name.bold(),
|
||||
);
|
||||
let mut lines = Vec::<String>::new();
|
||||
if resource.description != original.description {
|
||||
lines.push(format!(
|
||||
"{}: 'description'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
original.description.red(),
|
||||
"to".dimmed(),
|
||||
resource.description.green()
|
||||
))
|
||||
}
|
||||
if resource.tags != original_tags {
|
||||
let from = format!("{:?}", original_tags).red();
|
||||
let to = format!("{:?}", resource.tags).green();
|
||||
lines.push(format!(
|
||||
"{}: 'tags'\n{}: {from}\n{}: {to}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
"to".dimmed(),
|
||||
));
|
||||
}
|
||||
lines.extend(diff.iter_field_diffs().map(
|
||||
|FieldDiff { field, from, to }| {
|
||||
format!(
|
||||
"{}: '{field}'\n{}: {}\n{}: {}",
|
||||
"field".dimmed(),
|
||||
"from".dimmed(),
|
||||
from.red(),
|
||||
"to".dimmed(),
|
||||
to.green()
|
||||
)
|
||||
},
|
||||
));
|
||||
println!("{}", lines.join("\n-------------------\n"));
|
||||
}
|
||||
|
||||
// Minimizes updates through diffing.
|
||||
resource.config = diff.into();
|
||||
|
||||
let update = ToUpdateItem {
|
||||
id,
|
||||
update_description: resource.description
|
||||
!= original.description,
|
||||
update_tags: resource.tags != original_tags,
|
||||
resource,
|
||||
};
|
||||
|
||||
to_update.push(update);
|
||||
}
|
||||
None => {
|
||||
if !quiet {
|
||||
println!(
|
||||
"\n{}: {}: {}\n{}: {}\n{}: {:?}\n{}: {}",
|
||||
"CREATE".green(),
|
||||
Self::display(),
|
||||
resource.name.bold().green(),
|
||||
"description".dimmed(),
|
||||
resource.description,
|
||||
"tags".dimmed(),
|
||||
resource.tags,
|
||||
"config".dimmed(),
|
||||
serde_json::to_string_pretty(&resource.config)?
|
||||
)
|
||||
}
|
||||
to_create.push(resource);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if quiet && !to_create.is_empty() {
|
||||
println!(
|
||||
"\n{}s {}: {:#?}",
|
||||
Self::display(),
|
||||
"TO CREATE".green(),
|
||||
to_create.iter().map(|item| item.name.as_str())
|
||||
);
|
||||
}
|
||||
|
||||
if quiet && !to_update.is_empty() {
|
||||
println!(
|
||||
"\n{}s {}: {:#?}",
|
||||
Self::display(),
|
||||
"TO UPDATE".blue(),
|
||||
to_update
|
||||
.iter()
|
||||
.map(|update| update.resource.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update))
|
||||
}
|
||||
|
||||
async fn run_updates(
|
||||
to_create: ToCreate<Self::PartialConfig>,
|
||||
to_update: ToUpdate<Self::PartialConfig>,
|
||||
) {
|
||||
for resource in to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
Self::update_description(id, &name, description).await;
|
||||
info!(
|
||||
"{} {} '{}'",
|
||||
"created".green().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
|
||||
if update_description {
|
||||
Self::update_description(id.clone(), &name, description)
|
||||
.await;
|
||||
}
|
||||
|
||||
if update_tags {
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
}
|
||||
|
||||
if let Err(e) = Self::update(id, resource).await {
|
||||
warn!(
|
||||
"failed to update config on {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' configuration",
|
||||
"updated".blue().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_tags(id: String, name: &str, tags: Vec<String>) {
|
||||
// Update tags
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateTagsOnResource {
|
||||
target: Self::resource_target(id),
|
||||
tags,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to update tags on {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' tags",
|
||||
"updated".blue().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_description(
|
||||
id: String,
|
||||
name: &str,
|
||||
description: String,
|
||||
) {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdateDescription {
|
||||
target: Self::resource_target(id.clone()),
|
||||
description,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to update resource {id} description | {e:#}");
|
||||
} else {
|
||||
info!(
|
||||
"{} {} '{}' description",
|
||||
"updated".blue().bold(),
|
||||
Self::display(),
|
||||
name.bold(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
250
bin/cli/src/sync/resources/procedure.rs
Normal file
250
bin/cli/src/sync/resources/procedure.rs
Normal file
@@ -0,0 +1,250 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
read::GetProcedure,
|
||||
write::{CreateProcedure, UpdateProcedure},
|
||||
},
|
||||
entities::{
|
||||
procedure::{
|
||||
PartialProcedureConfig, Procedure, ProcedureConfig, ProcedureConfigDiff, ProcedureListItemInfo
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::{MaybeNone, PartialDiff};
|
||||
|
||||
use crate::{
|
||||
maps::{
|
||||
id_to_build, id_to_deployment, id_to_procedure, id_to_repo,
|
||||
id_to_server, name_to_procedure,
|
||||
},
|
||||
monitor_client,
|
||||
sync::resources::ToUpdateItem,
|
||||
};
|
||||
|
||||
use super::{ResourceSync, ToCreate, ToUpdate};
|
||||
|
||||
impl ResourceSync for Procedure {
|
||||
type Config = ProcedureConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialProcedureConfig;
|
||||
type ConfigDiff = ProcedureConfigDiff;
|
||||
type ListItemInfo = ProcedureListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"procedure"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Procedure(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_procedure()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateProcedure {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|p| p.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateProcedure {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn run_updates(
|
||||
mut to_create: ToCreate<Self::PartialConfig>,
|
||||
mut to_update: ToUpdate<Self::PartialConfig>,
|
||||
) {
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for i in 0..10 {
|
||||
let mut to_pull = Vec::new();
|
||||
for ToUpdateItem {
|
||||
id,
|
||||
resource,
|
||||
update_description,
|
||||
update_tags,
|
||||
} in &to_update
|
||||
{
|
||||
// Update resource
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
if *update_description {
|
||||
Self::update_description(id.clone(), &name, description)
|
||||
.await;
|
||||
}
|
||||
if *update_tags {
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
}
|
||||
if !resource.config.is_none() {
|
||||
if let Err(e) =
|
||||
Self::update(id.clone(), resource.clone()).await
|
||||
{
|
||||
if i == 9 {
|
||||
warn!(
|
||||
"failed to update {} {name} | {e:#}",
|
||||
Self::display()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("{} {name} updated", Self::display());
|
||||
// have to clone out so to_update is mutable
|
||||
to_pull.push(id.clone());
|
||||
}
|
||||
//
|
||||
to_update.retain(|resource| !to_pull.contains(&resource.id));
|
||||
|
||||
let mut to_pull = Vec::new();
|
||||
for resource in &to_create {
|
||||
let name = resource.name.clone();
|
||||
let tags = resource.tags.clone();
|
||||
let description = resource.description.clone();
|
||||
let id = match Self::create(resource.clone()).await {
|
||||
Ok(id) => id,
|
||||
Err(e) => {
|
||||
if i == 9 {
|
||||
warn!(
|
||||
"failed to create {} {name} | {e:#}",
|
||||
Self::display(),
|
||||
);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
Self::update_tags(id.clone(), &name, tags).await;
|
||||
Self::update_description(id, &name, description).await;
|
||||
info!("{} {name} created", Self::display());
|
||||
to_pull.push(name);
|
||||
}
|
||||
to_create.retain(|resource| !to_pull.contains(&resource.name));
|
||||
|
||||
if to_update.is_empty() && to_create.is_empty() {
|
||||
info!(
|
||||
"============ {}s synced ✅ ============",
|
||||
Self::display()
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
warn!("procedure sync loop exited after max iterations");
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetProcedure { procedure: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
for execution in &mut original.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::None(_) => {}
|
||||
Execution::RunProcedure(config) => {
|
||||
config.procedure = id_to_procedure()
|
||||
.get(&config.procedure)
|
||||
.map(|p| p.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RunBuild(config) => {
|
||||
config.build = id_to_build()
|
||||
.get(&config.build)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::Deploy(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StartContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::RemoveContainer(config) => {
|
||||
config.deployment = id_to_deployment()
|
||||
.get(&config.deployment)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::CloneRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PullRepo(config) => {
|
||||
config.repo = id_to_repo()
|
||||
.get(&config.repo)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::StopAllContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerNetworks(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerImages(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::PruneDockerContainers(config) => {
|
||||
config.server = id_to_server()
|
||||
.get(&config.server)
|
||||
.map(|d| d.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
91
bin/cli/src/sync/resources/repo.rs
Normal file
91
bin/cli/src/sync/resources/repo.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetRepo,
|
||||
write::{CreateRepo, UpdateRepo},
|
||||
},
|
||||
entities::{
|
||||
repo::{
|
||||
PartialRepoConfig, Repo, RepoConfig, RepoConfigDiff, RepoInfo,
|
||||
RepoListItemInfo,
|
||||
},
|
||||
resource::{Resource, ResourceListItem},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{
|
||||
maps::{id_to_server, name_to_repo},
|
||||
monitor_client,
|
||||
};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Repo {
|
||||
type Config = RepoConfig;
|
||||
type Info = RepoInfo;
|
||||
type PartialConfig = PartialRepoConfig;
|
||||
type ConfigDiff = RepoConfigDiff;
|
||||
type ListItemInfo = RepoListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"repo"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Repo(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_repo()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateRepo {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateRepo {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetRepo { repo: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
mut original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
// Need to replace server id with name
|
||||
original.server_id = id_to_server()
|
||||
.get(&original.server_id)
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
82
bin/cli/src/sync/resources/server.rs
Normal file
82
bin/cli/src/sync/resources/server.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetServer,
|
||||
write::{CreateServer, UpdateServer},
|
||||
},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
server::{
|
||||
PartialServerConfig, Server, ServerConfig, ServerConfigDiff,
|
||||
ServerListItemInfo,
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_server, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for Server {
|
||||
type Config = ServerConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerConfig;
|
||||
type ConfigDiff = ServerConfigDiff;
|
||||
type ListItemInfo = ServerListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::Server(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_server()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateServer {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateServer {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client().read(GetServer { server: id }).await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
85
bin/cli/src/sync/resources/server_template.rs
Normal file
85
bin/cli/src/sync/resources/server_template.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::GetServerTemplate,
|
||||
write::{CreateServerTemplate, UpdateServerTemplate},
|
||||
},
|
||||
entities::{
|
||||
resource::{Resource, ResourceListItem},
|
||||
server_template::{
|
||||
PartialServerTemplateConfig, ServerTemplate, ServerTemplateConfig, ServerTemplateConfigDiff, ServerTemplateListItemInfo
|
||||
},
|
||||
toml::ResourceToml,
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
use partial_derive2::PartialDiff;
|
||||
|
||||
use crate::{maps::name_to_server_template, monitor_client};
|
||||
|
||||
use super::ResourceSync;
|
||||
|
||||
impl ResourceSync for ServerTemplate {
|
||||
type Config = ServerTemplateConfig;
|
||||
type Info = ();
|
||||
type PartialConfig = PartialServerTemplateConfig;
|
||||
type ConfigDiff = ServerTemplateConfigDiff;
|
||||
type ListItemInfo = ServerTemplateListItemInfo;
|
||||
|
||||
fn display() -> &'static str {
|
||||
"server template"
|
||||
}
|
||||
|
||||
fn resource_target(id: String) -> ResourceTarget {
|
||||
ResourceTarget::ServerTemplate(id)
|
||||
}
|
||||
|
||||
fn name_to_resource(
|
||||
) -> &'static HashMap<String, ResourceListItem<Self::ListItemInfo>>
|
||||
{
|
||||
name_to_server_template()
|
||||
}
|
||||
|
||||
async fn create(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<String> {
|
||||
monitor_client()
|
||||
.write(CreateServerTemplate {
|
||||
name: resource.name,
|
||||
config: resource.config,
|
||||
})
|
||||
.await
|
||||
.map(|res| res.id)
|
||||
}
|
||||
|
||||
async fn update(
|
||||
id: String,
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
) -> anyhow::Result<()> {
|
||||
monitor_client()
|
||||
.write(UpdateServerTemplate {
|
||||
id,
|
||||
config: resource.config,
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(
|
||||
id: String,
|
||||
) -> anyhow::Result<Resource<Self::Config, Self::Info>> {
|
||||
monitor_client()
|
||||
.read(GetServerTemplate {
|
||||
server_template: id,
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
242
bin/cli/src/sync/user_group.rs
Normal file
242
bin/cli/src/sync/user_group.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
read::ListUserTargetPermissions,
|
||||
write::{
|
||||
CreateUserGroup, SetUsersInUserGroup, UpdatePermissionOnTarget,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
permission::UserTarget,
|
||||
toml::{PermissionToml, UserGroupToml},
|
||||
update::ResourceTarget,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
maps::{
|
||||
id_to_alerter, id_to_build, id_to_builder, id_to_deployment,
|
||||
id_to_procedure, id_to_repo, id_to_server, id_to_server_template,
|
||||
id_to_user, name_to_user_group,
|
||||
},
|
||||
monitor_client,
|
||||
};
|
||||
|
||||
pub async fn get_updates(
|
||||
user_groups: Vec<UserGroupToml>,
|
||||
) -> anyhow::Result<(Vec<UserGroupToml>, Vec<UserGroupToml>)> {
|
||||
let map = name_to_user_group();
|
||||
|
||||
let mut to_create = Vec::<UserGroupToml>::new();
|
||||
let mut to_update = Vec::<UserGroupToml>::new();
|
||||
|
||||
for mut user_group in user_groups {
|
||||
match map.get(&user_group.name).cloned() {
|
||||
Some(original) => {
|
||||
// replace the user ids with usernames
|
||||
let mut users = original
|
||||
.users
|
||||
.into_iter()
|
||||
.filter_map(|user_id| {
|
||||
id_to_user().get(&user_id).map(|u| u.username.clone())
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut permissions = monitor_client()
|
||||
.read(ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(original.id),
|
||||
})
|
||||
.await
|
||||
.context("failed to query for UserGroup permissions")?
|
||||
.into_iter()
|
||||
.map(|mut p| {
|
||||
// replace the ids with names
|
||||
match &mut p.resource_target {
|
||||
ResourceTarget::System(_) => {}
|
||||
ResourceTarget::Build(id) => {
|
||||
*id = id_to_build()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
*id = id_to_builder()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
*id = id_to_deployment()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
*id = id_to_server()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
*id = id_to_repo()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
*id = id_to_alerter()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
*id = id_to_procedure()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = id_to_server_template()
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
PermissionToml {
|
||||
target: p.resource_target,
|
||||
level: p.level,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
users.sort();
|
||||
user_group.users.sort();
|
||||
|
||||
user_group.permissions.sort_by(sort_permissions);
|
||||
permissions.sort_by(sort_permissions);
|
||||
|
||||
// only push update after failed diff
|
||||
if user_group.users != users
|
||||
|| user_group.permissions != permissions
|
||||
{
|
||||
// no update from users
|
||||
to_update.push(user_group);
|
||||
}
|
||||
}
|
||||
None => to_create.push(user_group),
|
||||
}
|
||||
}
|
||||
|
||||
if !to_create.is_empty() {
|
||||
println!(
|
||||
"\nUSER GROUPS TO CREATE: {}",
|
||||
to_create
|
||||
.iter()
|
||||
.map(|item| item.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
}
|
||||
|
||||
if !to_update.is_empty() {
|
||||
println!(
|
||||
"\nUSER GROUPS TO UPDATE: {}",
|
||||
to_update
|
||||
.iter()
|
||||
.map(|item| item.name.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
}
|
||||
|
||||
Ok((to_create, to_update))
|
||||
}
|
||||
|
||||
/// order permissions in deterministic way
|
||||
fn sort_permissions(
|
||||
a: &PermissionToml,
|
||||
b: &PermissionToml,
|
||||
) -> Ordering {
|
||||
let (a_t, a_id) = a.target.extract_variant_id();
|
||||
let (b_t, b_id) = b.target.extract_variant_id();
|
||||
match (a_t.cmp(&b_t), a_id.cmp(b_id)) {
|
||||
(Ordering::Greater, _) => Ordering::Greater,
|
||||
(Ordering::Less, _) => Ordering::Less,
|
||||
(_, Ordering::Greater) => Ordering::Greater,
|
||||
(_, Ordering::Less) => Ordering::Less,
|
||||
_ => Ordering::Equal,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_updates(
|
||||
to_create: Vec<UserGroupToml>,
|
||||
to_update: Vec<UserGroupToml>,
|
||||
) {
|
||||
let log_after = !to_update.is_empty() || !to_create.is_empty();
|
||||
|
||||
// Create the non-existant user groups
|
||||
for user_group in to_create {
|
||||
// Create the user group
|
||||
if let Err(e) = monitor_client()
|
||||
.write(CreateUserGroup {
|
||||
name: user_group.name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to create user group {} | {e:#}",
|
||||
user_group.name
|
||||
);
|
||||
continue;
|
||||
};
|
||||
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
update_permissions(user_group.name, user_group.permissions).await;
|
||||
}
|
||||
|
||||
// Update the existing user groups
|
||||
for user_group in to_update {
|
||||
set_users(user_group.name.clone(), user_group.users).await;
|
||||
update_permissions(user_group.name, user_group.permissions).await;
|
||||
}
|
||||
|
||||
if log_after {
|
||||
info!("============ user groups synced ✅ ============");
|
||||
}
|
||||
}
|
||||
|
||||
async fn set_users(user_group: String, users: Vec<String>) {
|
||||
if !users.is_empty() {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(SetUsersInUserGroup {
|
||||
user_group: user_group.clone(),
|
||||
users,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!("failed to set users in group {user_group} | {e:#}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_permissions(
|
||||
user_group: String,
|
||||
permissions: Vec<PermissionToml>,
|
||||
) {
|
||||
for PermissionToml { target, level } in permissions {
|
||||
if let Err(e) = monitor_client()
|
||||
.write(UpdatePermissionOnTarget {
|
||||
user_target: UserTarget::UserGroup(user_group.clone()),
|
||||
resource_target: target.clone(),
|
||||
permission: level,
|
||||
})
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to set permssion in group {user_group} | target: {target:?} | {e:#}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
59
bin/core/Cargo.toml
Normal file
59
bin/core/Cargo.toml
Normal file
@@ -0,0 +1,59 @@
|
||||
[package]
|
||||
name = "monitor_core"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "core"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
monitor_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
logger.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
merge_config_files.workspace = true
|
||||
termination_signal.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
parse_csl.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
urlencoding.workspace = true
|
||||
aws-sdk-ec2.workspace = true
|
||||
aws-config.workspace = true
|
||||
tokio-util.workspace = true
|
||||
axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
typeshare.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
futures.workspace = true
|
||||
anyhow.workspace = true
|
||||
dotenv.workspace = true
|
||||
bcrypt.workspace = true
|
||||
tokio.workspace = true
|
||||
tower.workspace = true
|
||||
serde.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
envy.workspace = true
|
||||
rand.workspace = true
|
||||
hmac.workspace = true
|
||||
sha2.workspace = true
|
||||
jwt.workspace = true
|
||||
hex.workspace = true
|
||||
23
bin/core/Dockerfile
Normal file
23
bin/core/Dockerfile
Normal file
@@ -0,0 +1,23 @@
|
||||
# Build Core
|
||||
FROM rust:1.78.0-bullseye as core-builder
|
||||
WORKDIR /builder
|
||||
COPY . .
|
||||
RUN cargo build -p monitor_core --release
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine as frontend-builder
|
||||
WORKDIR /builder
|
||||
COPY ./frontend ./frontend
|
||||
COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link @monitor/client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
# FROM gcr.io/distroless/cc
|
||||
FROM debian:bullseye-slim
|
||||
RUN apt update && apt install -y ca-certificates
|
||||
COPY ./config_example/core.config.example.toml /config/config.toml
|
||||
COPY --from=core-builder /builder/target/release/core /
|
||||
COPY --from=frontend-builder /builder/frontend/dist /frontend
|
||||
EXPOSE 9000
|
||||
CMD ["./core"]
|
||||
128
bin/core/src/api/auth.rs
Normal file
128
bin/core/src/api/auth.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use axum::{http::HeaderMap, routing::post, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::auth::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::{
|
||||
get_user_id_from_headers,
|
||||
github::{self, client::github_oauth_client},
|
||||
google::{self, client::google_oauth_client},
|
||||
},
|
||||
config::core_config,
|
||||
helpers::query::get_user,
|
||||
state::{jwt_client, State},
|
||||
};
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(HeaderMap)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
|
||||
pub enum AuthRequest {
|
||||
GetLoginOptions(GetLoginOptions),
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
LoginLocalUser(LoginLocalUser),
|
||||
ExchangeForJwt(ExchangeForJwt),
|
||||
GetUser(GetUser),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
let mut router = Router::new().route("/", post(handler));
|
||||
|
||||
if github_oauth_client().is_some() {
|
||||
router = router.nest("/github", github::router())
|
||||
}
|
||||
|
||||
if google_oauth_client().is_some() {
|
||||
router = router.nest("/google", google::router())
|
||||
}
|
||||
|
||||
router
|
||||
}
|
||||
|
||||
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
|
||||
async fn handler(
|
||||
headers: HeaderMap,
|
||||
Json(request): Json<AuthRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let timer = Instant::now();
|
||||
let req_id = Uuid::new_v4();
|
||||
debug!("/auth request {req_id} | METHOD: {}", request.req_type());
|
||||
let res = State.resolve_request(request, headers).await.map_err(
|
||||
|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
},
|
||||
);
|
||||
if let Err(e) = &res {
|
||||
debug!("/auth request {req_id} | error: {e:#}");
|
||||
}
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/auth request {req_id} | resolve time: {elapsed:?}");
|
||||
Ok((TypedHeader(ContentType::json()), res?))
|
||||
}
|
||||
|
||||
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
static GET_LOGIN_OPTIONS_RESPONSE: OnceLock<
|
||||
GetLoginOptionsResponse,
|
||||
> = OnceLock::new();
|
||||
GET_LOGIN_OPTIONS_RESPONSE.get_or_init(|| {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: config.github_oauth.enabled
|
||||
&& !config.github_oauth.id.is_empty()
|
||||
&& !config.github_oauth.secret.is_empty(),
|
||||
google: config.google_oauth.enabled
|
||||
&& !config.google_oauth.id.is_empty()
|
||||
&& !config.google_oauth.secret.is_empty(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<GetLoginOptions, HeaderMap> for State {
|
||||
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
_: GetLoginOptions,
|
||||
_: HeaderMap,
|
||||
) -> anyhow::Result<GetLoginOptionsResponse> {
|
||||
Ok(*login_options_reponse())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExchangeForJwt, HeaderMap> for State {
|
||||
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
ExchangeForJwt { token }: ExchangeForJwt,
|
||||
_: HeaderMap,
|
||||
) -> anyhow::Result<ExchangeForJwtResponse> {
|
||||
let jwt = jwt_client().redeem_exchange_token(&token).await?;
|
||||
let res = ExchangeForJwtResponse { jwt };
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetUser, HeaderMap> for State {
|
||||
#[instrument(name = "GetUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetUser {}: GetUser,
|
||||
headers: HeaderMap,
|
||||
) -> anyhow::Result<User> {
|
||||
let user_id = get_user_id_from_headers(&headers).await?;
|
||||
get_user(&user_id).await
|
||||
}
|
||||
}
|
||||
611
bin/core/src/api/execute/build.rs
Normal file
611
bin/core/src/api/execute/build.rs
Normal file
@@ -0,0 +1,611 @@
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::{
|
||||
CancelBuild, CancelBuildResponse, Deploy, RunBuild,
|
||||
},
|
||||
entities::{
|
||||
all_logs_success,
|
||||
build::Build,
|
||||
builder::{AwsBuilderConfig, Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server_template::AwsServerTemplateConfig,
|
||||
update::{Log, Update},
|
||||
user::{auto_redeploy_user, User},
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, to_bson, to_document},
|
||||
};
|
||||
use periphery_client::{
|
||||
api::{self, GetVersionResponse},
|
||||
PeripheryClient,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::{serialize_error, serialize_error_pretty};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{
|
||||
cloud::{
|
||||
aws::{
|
||||
launch_ec2_instance, terminate_ec2_instance_with_retry,
|
||||
Ec2Instance,
|
||||
},
|
||||
BuildCleanupData,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::{
|
||||
channel::build_cancel_channel,
|
||||
periphery_client,
|
||||
query::{get_deployment_state, get_global_variables},
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource::{self, refresh_build_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<RunBuild, User> for State {
|
||||
#[instrument(name = "RunBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RunBuild { build }: RunBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the build (or insert default).
|
||||
let action_state =
|
||||
action_states().build.get_or_insert_default(&build.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure build not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.building = true)?;
|
||||
|
||||
build.config.version.increment();
|
||||
|
||||
let mut update = make_update(&build, Operation::RunBuild, &user);
|
||||
update.in_progress();
|
||||
update.version = build.config.version.clone();
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
let mut cancel_recv =
|
||||
build_cancel_channel().receiver.resubscribe();
|
||||
let build_id = build.id.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let poll = async {
|
||||
loop {
|
||||
let (incoming_build_id, mut update) = tokio::select! {
|
||||
_ = cancel_clone.cancelled() => return Ok(()),
|
||||
id = cancel_recv.recv() => id?
|
||||
};
|
||||
if incoming_build_id == build_id {
|
||||
info!("build cancel acknowledged");
|
||||
update.push_simple_log(
|
||||
"cancel acknowledged",
|
||||
"the build cancellation has been queud, it may still take some time",
|
||||
);
|
||||
update.finalize();
|
||||
let id = update.id.clone();
|
||||
if let Err(e) = update_update(update).await {
|
||||
warn!("failed to update Update {id} | {e:#}");
|
||||
}
|
||||
cancel_clone.cancel();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
#[allow(unreachable_code)]
|
||||
anyhow::Ok(())
|
||||
};
|
||||
tokio::select! {
|
||||
_ = cancel_clone.cancelled() => {}
|
||||
_ = poll => {}
|
||||
}
|
||||
});
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) =
|
||||
match get_build_builder(&build, &mut update).await {
|
||||
Ok(builder) => {
|
||||
info!("got builder for build");
|
||||
builder
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed to get builder | {e:#}");
|
||||
update.logs.push(Log::error(
|
||||
"get builder",
|
||||
serialize_error_pretty(&e),
|
||||
));
|
||||
return handle_early_return(update).await;
|
||||
}
|
||||
};
|
||||
|
||||
let core_config = core_config();
|
||||
let variables = get_global_variables().await?;
|
||||
|
||||
// CLONE REPO
|
||||
|
||||
let github_token = core_config
|
||||
.github_accounts
|
||||
.get(&build.config.github_account)
|
||||
.cloned();
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&build).into(),
|
||||
github_token,
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_early_return(update).await
|
||||
},
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(clone_logs) => {
|
||||
info!("finished repo clone");
|
||||
update.logs.extend(clone_logs);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
update.push_error_log("clone repo", serialize_error(&e));
|
||||
}
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if all_logs_success(&update.logs) {
|
||||
let docker_token = core_config
|
||||
.docker_accounts
|
||||
.get(&build.config.docker_account)
|
||||
.cloned();
|
||||
|
||||
// Interpolate variables / secrets into build args
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
for arg in &mut build.config.build_args {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&arg.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
arg.value = res;
|
||||
}
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
docker_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
return handle_early_return(update).await
|
||||
},
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(logs) => {
|
||||
info!("finished build");
|
||||
update.logs.extend(logs);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("error in build | {e:#}");
|
||||
update.push_error_log("build", serialize_error(&e))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
if update.success {
|
||||
let _ = db
|
||||
.builds
|
||||
.update_one(
|
||||
doc! { "name": &build.name },
|
||||
doc! {
|
||||
"$set": {
|
||||
"config.version": to_bson(&build.config.version)
|
||||
.context("failed at converting version to bson")?,
|
||||
"info.last_built_at": monitor_timestamp(),
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
cancel.cancel();
|
||||
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_build_state_cache().await;
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if update.success {
|
||||
// don't hold response up for user
|
||||
tokio::spawn(async move {
|
||||
handle_post_build_redeploy(&build.id).await;
|
||||
info!("post build redeploy handled");
|
||||
});
|
||||
}
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_early_return(
|
||||
mut update: Update,
|
||||
) -> anyhow::Result<Update> {
|
||||
update.finalize();
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_build_state_cache().await;
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
impl Resolve<CancelBuild, User> for State {
|
||||
#[instrument(name = "CancelBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CancelBuild { build }: CancelBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<CancelBuildResponse> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// check if theres already an open cancel build update
|
||||
if db_client()
|
||||
.await
|
||||
.updates
|
||||
.find_one(
|
||||
doc! {
|
||||
"operation": "CancelBuild",
|
||||
"status": "InProgress",
|
||||
"target.id": &build.id,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query updates")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("Build cancel is already in progress"));
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&build, Operation::CancelBuild, &user);
|
||||
|
||||
update.push_simple_log(
|
||||
"cancel triggered",
|
||||
"the build cancel has been triggered",
|
||||
);
|
||||
update.in_progress();
|
||||
|
||||
update.id =
|
||||
add_update(make_update(&build, Operation::CancelBuild, &user))
|
||||
.await?;
|
||||
|
||||
build_cancel_channel()
|
||||
.sender
|
||||
.lock()
|
||||
.await
|
||||
.send((build.id, update))?;
|
||||
|
||||
Ok(CancelBuildResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
const BUILDER_POLL_RATE_SECS: u64 = 2;
|
||||
const BUILDER_POLL_MAX_TRIES: usize = 30;
|
||||
|
||||
#[instrument]
|
||||
async fn get_build_builder(
|
||||
build: &Build,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
|
||||
if build.config.builder_id.is_empty() {
|
||||
return Err(anyhow!("build has not configured a builder"));
|
||||
}
|
||||
let builder =
|
||||
resource::get::<Builder>(&build.config.builder_id).await?;
|
||||
match builder.config {
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
return Err(anyhow!("builder has not configured a server"));
|
||||
}
|
||||
let server = resource::get::<Server>(&config.server_id).await?;
|
||||
let periphery = periphery_client(&server)?;
|
||||
Ok((
|
||||
periphery,
|
||||
BuildCleanupData::Server {
|
||||
repo_name: build.name.clone(),
|
||||
},
|
||||
))
|
||||
}
|
||||
BuilderConfig::Aws(config) => {
|
||||
get_aws_builder(build, config, update).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn get_aws_builder(
|
||||
build: &Build,
|
||||
config: AwsBuilderConfig,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<(PeripheryClient, BuildCleanupData)> {
|
||||
let start_create_ts = monitor_timestamp();
|
||||
|
||||
let instance_name =
|
||||
format!("BUILDER-{}-v{}", build.name, build.config.version);
|
||||
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
|
||||
&instance_name,
|
||||
AwsServerTemplateConfig::from_builder_config(&config),
|
||||
)
|
||||
.await?;
|
||||
|
||||
info!("ec2 instance launched");
|
||||
|
||||
let log = Log {
|
||||
stage: "start build instance".to_string(),
|
||||
success: true,
|
||||
stdout: start_aws_builder_log(&instance_id, &ip, &config),
|
||||
start_ts: start_create_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery_address = format!("http://{ip}:{}", config.port);
|
||||
let periphery =
|
||||
PeripheryClient::new(&periphery_address, &core_config().passkey);
|
||||
|
||||
let start_connect_ts = monitor_timestamp();
|
||||
let mut res = Ok(GetVersionResponse {
|
||||
version: String::new(),
|
||||
});
|
||||
for _ in 0..BUILDER_POLL_MAX_TRIES {
|
||||
let version = periphery
|
||||
.request(api::GetVersion {})
|
||||
.await
|
||||
.context("failed to reach periphery client on builder");
|
||||
if let Ok(GetVersionResponse { version }) = &version {
|
||||
let connect_log = Log {
|
||||
stage: "build instance connected".to_string(),
|
||||
success: true,
|
||||
stdout: format!(
|
||||
"established contact with periphery on builder\nperiphery version: v{}",
|
||||
version
|
||||
),
|
||||
start_ts: start_connect_ts,
|
||||
end_ts: monitor_timestamp(),
|
||||
..Default::default()
|
||||
};
|
||||
update.logs.push(connect_log);
|
||||
update_update(update.clone()).await?;
|
||||
return Ok((
|
||||
periphery,
|
||||
BuildCleanupData::Aws {
|
||||
instance_id,
|
||||
region: config.region,
|
||||
},
|
||||
));
|
||||
}
|
||||
res = version;
|
||||
tokio::time::sleep(Duration::from_secs(BUILDER_POLL_RATE_SECS))
|
||||
.await;
|
||||
}
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
terminate_ec2_instance_with_retry(config.region, &instance_id)
|
||||
.await;
|
||||
});
|
||||
|
||||
// Unwrap is safe, only way to get here is after check Ok / early return, so it must be err
|
||||
Err(res.err().unwrap())
|
||||
}
|
||||
|
||||
#[instrument(skip(periphery))]
|
||||
async fn cleanup_builder_instance(
|
||||
periphery: PeripheryClient,
|
||||
cleanup_data: BuildCleanupData,
|
||||
update: &mut Update,
|
||||
) {
|
||||
match cleanup_data {
|
||||
BuildCleanupData::Server { repo_name } => {
|
||||
let _ = periphery
|
||||
.request(api::git::DeleteRepo { name: repo_name })
|
||||
.await;
|
||||
}
|
||||
BuildCleanupData::Aws {
|
||||
instance_id,
|
||||
region,
|
||||
} => {
|
||||
let _instance_id = instance_id.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ =
|
||||
terminate_ec2_instance_with_retry(region, &_instance_id)
|
||||
.await;
|
||||
});
|
||||
update.push_simple_log(
|
||||
"terminate instance",
|
||||
format!("termination queued for instance id {instance_id}"),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let Ok(redeploy_deployments) = find_collect(
|
||||
&db_client().await.deployments,
|
||||
doc! {
|
||||
"config.image.params.build_id": build_id,
|
||||
"config.redeploy_on_build": true
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
let futures =
|
||||
redeploy_deployments
|
||||
.into_iter()
|
||||
.map(|deployment| async move {
|
||||
let state =
|
||||
get_deployment_state(&deployment).await.unwrap_or_default();
|
||||
if state == DeploymentState::Running {
|
||||
let res = State
|
||||
.resolve(
|
||||
Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
},
|
||||
auto_redeploy_user().to_owned(),
|
||||
)
|
||||
.await;
|
||||
Some((deployment.id.clone(), res))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
let redeploy_results = join_all(futures).await;
|
||||
|
||||
let mut redeploys = Vec::<String>::new();
|
||||
let mut redeploy_failures = Vec::<String>::new();
|
||||
|
||||
for res in redeploy_results {
|
||||
if res.is_none() {
|
||||
continue;
|
||||
}
|
||||
let (id, res) = res.unwrap();
|
||||
match res {
|
||||
Ok(_) => redeploys.push(id),
|
||||
Err(e) => redeploy_failures.push(format!("{id}: {e:#?}")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn start_aws_builder_log(
|
||||
instance_id: &str,
|
||||
ip: &str,
|
||||
config: &AwsBuilderConfig,
|
||||
) -> String {
|
||||
let AwsBuilderConfig {
|
||||
ami_id,
|
||||
instance_type,
|
||||
volume_gb,
|
||||
subnet_id,
|
||||
assign_public_ip,
|
||||
security_group_ids,
|
||||
use_public_ip,
|
||||
..
|
||||
} = config;
|
||||
|
||||
let readable_sec_group_ids = security_group_ids.join(", ");
|
||||
|
||||
format!("instance id: {instance_id}\nip: {ip}\nami id: {ami_id}\ninstance type: {instance_type}\nvolume size: {volume_gb} GB\nsubnet id: {subnet_id}\nsecurity groups: {readable_sec_group_ids}\nassign public ip: {assign_public_ip}\nuse public ip: {use_public_ip}")
|
||||
}
|
||||
507
bin/core/src/api/execute/deployment.rs
Normal file
507
bin/core/src/api/execute/deployment.rs
Normal file
@@ -0,0 +1,507 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
build::Build,
|
||||
deployment::{Deployment, DeploymentImage},
|
||||
get_image_name, monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::ServerState,
|
||||
update::{Log, ResourceTarget, Update, UpdateStatus},
|
||||
user::User,
|
||||
Operation, Version,
|
||||
},
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::{get_global_variables, get_server_with_status},
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<Deploy, User> for State {
|
||||
#[instrument(name = "Deploy", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
Deploy {
|
||||
deployment,
|
||||
stop_signal,
|
||||
stop_time,
|
||||
}: Deploy,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let mut deployment =
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.deploying = true)?;
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let version = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_name = get_image_name(&build);
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
version
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version}"),
|
||||
};
|
||||
// set docker account to match build docker account if it's not overridden by deployment
|
||||
if deployment.config.docker_account.is_empty() {
|
||||
deployment.config.docker_account =
|
||||
build.config.docker_account;
|
||||
}
|
||||
version
|
||||
}
|
||||
DeploymentImage::Image { .. } => Version::default(),
|
||||
};
|
||||
|
||||
let variables = get_global_variables().await?;
|
||||
let core_config = core_config();
|
||||
|
||||
// Interpolate variables into environment
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
for env in &mut deployment.config.environment {
|
||||
// first pass - global variables
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&env.value,
|
||||
&variables,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate global variables")?;
|
||||
global_replacers.extend(more_replacers);
|
||||
// second pass - core secrets
|
||||
let (res, more_replacers) = svi::interpolate_variables(
|
||||
&res,
|
||||
&core_config.secrets,
|
||||
svi::Interpolator::DoubleBrackets,
|
||||
false,
|
||||
)
|
||||
.context("failed to interpolate core secrets")?;
|
||||
secret_replacers.extend(more_replacers);
|
||||
|
||||
// set env value with the result
|
||||
env.value = res;
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::DeployContainer, &user);
|
||||
update.in_progress();
|
||||
update.version = version;
|
||||
|
||||
// Show which variables were interpolated
|
||||
if !global_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate global variables",
|
||||
global_replacers
|
||||
.into_iter()
|
||||
.map(|(value, variable)| format!("<span class=\"text-muted-foreground\">{variable} =></span> {value}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
if !secret_replacers.is_empty() {
|
||||
update.push_simple_log(
|
||||
"interpolate core secrets",
|
||||
secret_replacers
|
||||
.iter()
|
||||
.map(|(_, variable)| format!("<span class=\"text-muted-foreground\">replaced:</span> {variable}"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n"),
|
||||
);
|
||||
}
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let docker_token = core_config
|
||||
.docker_accounts
|
||||
.get(&deployment.config.docker_account)
|
||||
.cloned();
|
||||
|
||||
match periphery
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal,
|
||||
stop_time,
|
||||
docker_token,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"deploy container",
|
||||
serialize_error_pretty(&e),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StartContainer, User> for State {
|
||||
#[instrument(name = "StartContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StartContainer { deployment }: StartContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.starting = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
target: ResourceTarget::Deployment(deployment.id.clone()),
|
||||
operation: Operation::StartContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => {
|
||||
Log::error("start container", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopContainer, User> for State {
|
||||
#[instrument(name = "StopContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopContainer {
|
||||
deployment,
|
||||
signal,
|
||||
time,
|
||||
}: StopContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.stopping = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::StopContainer, &user);
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name.clone(),
|
||||
signal: signal
|
||||
.unwrap_or(deployment.config.termination_signal)
|
||||
.into(),
|
||||
time: time
|
||||
.unwrap_or(deployment.config.termination_timeout)
|
||||
.into(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => {
|
||||
Log::error("stop container", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<StopAllContainers, User> for State {
|
||||
#[instrument(name = "StopAllContainers", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
StopAllContainers { server }: StopAllContainers,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let (server, status) = get_server_with_status(&server).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
// get the action state for the server (or insert default).
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure server not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard = action_state
|
||||
.update(|state| state.stopping_containers = true)?;
|
||||
|
||||
let deployments = find_collect(
|
||||
&db_client().await.deployments,
|
||||
doc! {
|
||||
"config.server_id": &server.id
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to find deployments on server")?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::StopAllContainers, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let futures = deployments.iter().map(|deployment| async {
|
||||
(
|
||||
self
|
||||
.resolve(
|
||||
StopContainer {
|
||||
deployment: deployment.id.clone(),
|
||||
signal: None,
|
||||
time: None,
|
||||
},
|
||||
user.clone(),
|
||||
)
|
||||
.await,
|
||||
deployment.name.clone(),
|
||||
deployment.id.clone(),
|
||||
)
|
||||
});
|
||||
let results = join_all(futures).await;
|
||||
let deployment_names = deployments
|
||||
.iter()
|
||||
.map(|d| format!("{} ({})", d.name, d.id))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
update.push_simple_log("stopping containers", deployment_names);
|
||||
for (res, name, id) in results {
|
||||
if let Err(e) = res {
|
||||
update.push_error_log(
|
||||
"stop container failure",
|
||||
format!(
|
||||
"failed to stop container {name} ({id})\n\n{}",
|
||||
serialize_error_pretty(&e)
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RemoveContainer, User> for State {
|
||||
#[instrument(name = "RemoveContainer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RemoveContainer {
|
||||
deployment,
|
||||
signal,
|
||||
time,
|
||||
}: RemoveContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.removing = true)?;
|
||||
|
||||
if deployment.config.server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server configured"));
|
||||
}
|
||||
|
||||
let (server, status) =
|
||||
get_server_with_status(&deployment.config.server_id).await?;
|
||||
if status != ServerState::Ok {
|
||||
return Err(anyhow!(
|
||||
"cannot send action when server is unreachable or disabled"
|
||||
));
|
||||
}
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
target: ResourceTarget::Deployment(deployment.id.clone()),
|
||||
operation: Operation::RemoveContainer,
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
success: true,
|
||||
operator: user.id.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name.clone(),
|
||||
signal: signal
|
||||
.unwrap_or(deployment.config.termination_signal)
|
||||
.into(),
|
||||
time: time
|
||||
.unwrap_or(deployment.config.termination_timeout)
|
||||
.into(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(log) => log,
|
||||
Err(e) => {
|
||||
Log::error("stop container", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
109
bin/core/src/api/execute/mod.rs
Normal file
109
bin/core/src/api/execute/mod.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::execute::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::auth_request, state::State};
|
||||
|
||||
mod build;
|
||||
mod deployment;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum ExecuteRequest {
|
||||
// ==== SERVER ====
|
||||
PruneContainers(PruneDockerContainers),
|
||||
PruneImages(PruneDockerImages),
|
||||
PruneNetworks(PruneDockerNetworks),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
StartContainer(StartContainer),
|
||||
StopContainer(StopContainer),
|
||||
StopAllContainers(StopAllContainers),
|
||||
RemoveContainer(RemoveContainer),
|
||||
|
||||
// ==== BUILD ====
|
||||
RunBuild(RunBuild),
|
||||
CancelBuild(CancelBuild),
|
||||
|
||||
// ==== REPO ====
|
||||
CloneRepo(CloneRepo),
|
||||
PullRepo(PullRepo),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
RunProcedure(RunProcedure),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer(LaunchServer),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let req_id = Uuid::new_v4();
|
||||
|
||||
let res = tokio::spawn(task(req_id, request, user))
|
||||
.await
|
||||
.context("failure in spawned execute task");
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} spawn error: {e:#}",);
|
||||
}
|
||||
|
||||
Ok((TypedHeader(ContentType::json()), res??))
|
||||
}
|
||||
|
||||
#[instrument(name = "ExecuteRequest", skip(user))]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: ExecuteRequest,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
info!(
|
||||
"/execute request {req_id} | user: {} ({})",
|
||||
user.username, user.id
|
||||
);
|
||||
let timer = Instant::now();
|
||||
|
||||
let res =
|
||||
State
|
||||
.resolve_request(request, user)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} error: {e:#}");
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("/execute request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res
|
||||
}
|
||||
112
bin/core/src/api/execute/procedure.rs
Normal file
112
bin/core/src/api/execute/procedure.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use monitor_client::{
|
||||
api::execute::RunProcedure,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure,
|
||||
update::Update, user::User, Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
procedure::execute_procedure,
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource::{self, refresh_procedure_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<RunProcedure, User> for State {
|
||||
#[instrument(name = "RunProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RunProcedure { procedure }: RunProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
resolve_inner(procedure, user).await
|
||||
}
|
||||
}
|
||||
|
||||
fn resolve_inner(
|
||||
procedure: String,
|
||||
user: User,
|
||||
) -> Pin<
|
||||
Box<
|
||||
dyn std::future::Future<Output = anyhow::Result<Update>> + Send,
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the procedure (or insert default).
|
||||
let action_state = action_states()
|
||||
.procedure
|
||||
.get_or_insert_default(&procedure.id)
|
||||
.await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure procedure not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&procedure, Operation::RunProcedure, &user);
|
||||
update.in_progress();
|
||||
update.push_simple_log(
|
||||
"execute procedure",
|
||||
format!("Executing procedure: {}", procedure.name),
|
||||
);
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let update = Mutex::new(update);
|
||||
|
||||
let res = execute_procedure(&procedure, &update).await;
|
||||
|
||||
let mut update = update.into_inner();
|
||||
|
||||
match res {
|
||||
Ok(_) => {
|
||||
update.push_simple_log(
|
||||
"execution ok",
|
||||
"the procedure has completed with no errors",
|
||||
);
|
||||
}
|
||||
Err(e) => update.push_error_log(
|
||||
"execution error",
|
||||
serialize_error_pretty(&e),
|
||||
),
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_procedure_state_cache().await;
|
||||
}
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
})
|
||||
}
|
||||
216
bin/core/src/api/execute/repo.rs
Normal file
216
bin/core/src/api/execute/repo.rs
Normal file
@@ -0,0 +1,216 @@
|
||||
use anyhow::anyhow;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
monitor_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
update::{Log, ResourceTarget, Update, UpdateStatus},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
periphery_client,
|
||||
update::{add_update, update_update},
|
||||
},
|
||||
resource::{self, refresh_repo_state_cache},
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CloneRepo, User> for State {
|
||||
#[instrument(name = "CloneRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CloneRepo { repo }: CloneRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the repo (or insert default).
|
||||
let action_state =
|
||||
action_states().repo.get_or_insert_default(&repo.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure repo not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.cloning = true)?;
|
||||
|
||||
if repo.config.server_id.is_empty() {
|
||||
return Err(anyhow!("repo has no server attached"));
|
||||
}
|
||||
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
operation: Operation::CloneRepo,
|
||||
target: ResourceTarget::Repo(repo.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let github_token = core_config()
|
||||
.github_accounts
|
||||
.get(&repo.config.github_account)
|
||||
.cloned();
|
||||
|
||||
let logs = match periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&repo).into(),
|
||||
github_token,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(logs) => logs,
|
||||
Err(e) => {
|
||||
vec![Log::error("clone repo", serialize_error_pretty(&e))]
|
||||
}
|
||||
};
|
||||
|
||||
update.logs.extend(logs);
|
||||
update.finalize();
|
||||
|
||||
if update.success {
|
||||
update_last_pulled(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<PullRepo, User> for State {
|
||||
#[instrument(name = "PullRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PullRepo { repo }: PullRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the repo (or insert default).
|
||||
let action_state =
|
||||
action_states().repo.get_or_insert_default(&repo.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure repo not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pulling = true)?;
|
||||
|
||||
if repo.config.server_id.is_empty() {
|
||||
return Err(anyhow!("repo has no server attached"));
|
||||
}
|
||||
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
let mut update = Update {
|
||||
operation: Operation::PullRepo,
|
||||
target: ResourceTarget::Repo(repo.id.clone()),
|
||||
start_ts,
|
||||
status: UpdateStatus::InProgress,
|
||||
operator: user.id.clone(),
|
||||
success: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let logs = match periphery
|
||||
.request(api::git::PullRepo {
|
||||
name: repo.name.clone(),
|
||||
branch: optional_string(&repo.config.branch),
|
||||
commit: optional_string(&repo.config.commit),
|
||||
on_pull: repo.config.on_pull.into_option(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(logs) => logs,
|
||||
Err(e) => {
|
||||
vec![Log::error("pull repo", serialize_error_pretty(&e))]
|
||||
}
|
||||
};
|
||||
|
||||
update.logs.extend(logs);
|
||||
|
||||
update.finalize();
|
||||
|
||||
if update.success {
|
||||
update_last_pulled(&repo.name).await;
|
||||
}
|
||||
|
||||
handle_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_update_return(
|
||||
update: Update,
|
||||
) -> anyhow::Result<Update> {
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
// The Err case of to_document should be unreachable,
|
||||
// but will fail to update cache in that case.
|
||||
if let Ok(update_doc) = to_document(&update) {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().await.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
refresh_repo_state_cache().await;
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
async fn update_last_pulled(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.await
|
||||
.repos
|
||||
.update_one(
|
||||
doc! { "name": repo_name },
|
||||
doc! { "$set": { "info.last_pulled_at": monitor_timestamp() } },
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
warn!(
|
||||
"failed to update repo last_pulled_at | repo: {repo_name} | {e:#}",
|
||||
);
|
||||
}
|
||||
}
|
||||
190
bin/core/src/api/execute/server.rs
Normal file
190
bin/core/src/api/execute/server.rs
Normal file
@@ -0,0 +1,190 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, Update, UpdateStatus},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource,
|
||||
state::{action_states, State},
|
||||
};
|
||||
|
||||
impl Resolve<PruneDockerContainers, User> for State {
|
||||
#[instrument(name = "PruneDockerContainers", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerContainers { server }: PruneDockerContainers,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the server (or insert default).
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure server not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pruning_containers = true)?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneContainersServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune containers on server {}",
|
||||
server.name
|
||||
)) {
|
||||
Ok(log) => log,
|
||||
Err(e) => {
|
||||
Log::error("prune containers", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
|
||||
update.success = log.success;
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.logs.push(log);
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<PruneDockerNetworks, User> for State {
|
||||
#[instrument(name = "PruneDockerNetworks", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerNetworks { server }: PruneDockerNetworks,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the server (or insert default).
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure server not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pruning_networks = true)?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneNetworksServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::PruneNetworks {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
server.name
|
||||
)) {
|
||||
Ok(log) => log,
|
||||
Err(e) => {
|
||||
Log::error("prune networks", serialize_error_pretty(&e))
|
||||
}
|
||||
};
|
||||
|
||||
update.success = log.success;
|
||||
update.status = UpdateStatus::Complete;
|
||||
update.end_ts = Some(monitor_timestamp());
|
||||
update.logs.push(log);
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<PruneDockerImages, User> for State {
|
||||
#[instrument(name = "PruneDockerImages", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PruneDockerImages { server }: PruneDockerImages,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the server (or insert default).
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure server not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.pruning_images = true)?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::PruneImagesServer, &user);
|
||||
update.in_progress();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune images",
|
||||
format!(
|
||||
"failed to prune images on server {} | {e:#?}",
|
||||
server.name
|
||||
),
|
||||
),
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
|
||||
update.finalize();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
115
bin/core/src/api/execute/server_template.rs
Normal file
115
bin/core/src/api/execute/server_template.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
server::PartialServerConfig,
|
||||
server_template::{ServerTemplate, ServerTemplateConfig},
|
||||
update::Update,
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
cloud::aws::launch_ec2_instance, helpers::update::{add_update, make_update, update_update}, resource, state::{db_client, State}
|
||||
};
|
||||
|
||||
impl Resolve<LaunchServer, User> for State {
|
||||
#[instrument(name = "LaunchServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
LaunchServer {
|
||||
name,
|
||||
server_template,
|
||||
}: LaunchServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
.await
|
||||
.servers
|
||||
.find_one(
|
||||
doc! {
|
||||
"name": &name
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("name is already taken"));
|
||||
}
|
||||
|
||||
let template = resource::get_check_permissions::<ServerTemplate>(
|
||||
&server_template,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&template, Operation::LaunchServer, &user);
|
||||
update.in_progress();
|
||||
update.push_simple_log(
|
||||
"launching server",
|
||||
format!("{:#?}", template.config),
|
||||
);
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let instance = launch_ec2_instance(&name, config).await;
|
||||
if let Err(e) = &instance {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
let instance = instance.unwrap();
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {name} on ip {}",
|
||||
instance.ip
|
||||
),
|
||||
);
|
||||
PartialServerConfig {
|
||||
address: format!("http://{}:8120", instance.ip).into(),
|
||||
region: region.into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match self.resolve(CreateServer { name, config }, user).await {
|
||||
Ok(server) => {
|
||||
update.push_simple_log(
|
||||
"create server",
|
||||
format!("created server {} ({})", server.name, server.id),
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"create server",
|
||||
format!(
|
||||
"failed to create server\n\n{}",
|
||||
serialize_error_pretty(&e)
|
||||
),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
4
bin/core/src/api/mod.rs
Normal file
4
bin/core/src/api/mod.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod read;
|
||||
pub mod write;
|
||||
83
bin/core/src/api/read/alert.rs
Normal file
83
bin/core/src/api/read/alert.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
},
|
||||
entities::{update::ResourceTargetVariant, user::User},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const NUM_ALERTS_PER_PAGE: u64 = 100;
|
||||
|
||||
impl Resolve<ListAlerts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListAlerts { query, page }: ListAlerts,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListAlertsResponse> {
|
||||
let mut query = query.unwrap_or_default();
|
||||
if !user.admin {
|
||||
let server_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Server,
|
||||
)
|
||||
.await?;
|
||||
let deployment_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?;
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
|
||||
]
|
||||
});
|
||||
}
|
||||
|
||||
let alerts = find_collect(
|
||||
&db_client().await.alerts,
|
||||
query,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "ts": -1 })
|
||||
.limit(NUM_ALERTS_PER_PAGE as i64)
|
||||
.skip(page * NUM_ALERTS_PER_PAGE)
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to get alerts from db")?;
|
||||
|
||||
let next_page = if alerts.len() < NUM_ALERTS_PER_PAGE as usize {
|
||||
None
|
||||
} else {
|
||||
Some((page + 1) as i64)
|
||||
};
|
||||
|
||||
let res = ListAlertsResponse { next_page, alerts };
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAlert, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAlert { id }: GetAlert,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetAlertResponse> {
|
||||
find_one_by_id(&db_client().await.alerts, &id)
|
||||
.await
|
||||
.context("failed to query db for alert")?
|
||||
.context("no alert found with given id")
|
||||
}
|
||||
}
|
||||
80
bin/core/src/api/read/alerter.rs
Normal file
80
bin/core/src/api/read/alerter.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
alerter::{Alerter, AlerterListItem},
|
||||
permission::PermissionLevel,
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetAlerter, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAlerter { alerter }: GetAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
&alerter,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListAlerters, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListAlerters { query }: ListAlerters,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<AlerterListItem>> {
|
||||
resource::list_for_user::<Alerter>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAlertersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAlertersSummary {}: GetAlertersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAlertersSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.alerters
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all alerter documents")?;
|
||||
let res = GetAlertersSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
298
bin/core/src/api/read/build.rs
Normal file
298
bin/core/src/api/read/build.rs
Normal file
@@ -0,0 +1,298 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
str::FromStr,
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use futures::TryStreamExt;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
permission::PermissionLevel,
|
||||
update::{ResourceTargetVariant, UpdateStatus},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, oid::ObjectId},
|
||||
options::FindOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::{Resolve, ResolveToString};
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, build_state_cache, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetBuild, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuild { build }: GetBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListBuilds, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListBuilds { query }: ListBuilds,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<BuildListItem>> {
|
||||
resource::list_for_user::<Build>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildActionState { build }: GetBuildActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<BuildActionState> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.build
|
||||
.get(&build.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildsSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildsSummary {}: GetBuildsSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuildsSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Build,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
|
||||
let builds = find_collect(&db_client().await.builds, query, None)
|
||||
.await
|
||||
.context("failed to find all build documents")?;
|
||||
let mut res = GetBuildsSummaryResponse::default();
|
||||
|
||||
let cache = build_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for build in builds {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&build.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.build
|
||||
.get(&build.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.building => {
|
||||
res.building += 1;
|
||||
}
|
||||
(BuildState::Ok, _) => res.ok += 1,
|
||||
(BuildState::Failed, _) => res.failed += 1,
|
||||
(BuildState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(BuildState::Building, _) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
const ONE_DAY_MS: i64 = 86400000;
|
||||
|
||||
impl Resolve<GetBuildMonthlyStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildMonthlyStats { page }: GetBuildMonthlyStats,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetBuildMonthlyStatsResponse> {
|
||||
let curr_ts = unix_timestamp_ms() as i64;
|
||||
let next_day = curr_ts - curr_ts % ONE_DAY_MS + ONE_DAY_MS;
|
||||
|
||||
let close_ts = next_day - page as i64 * 30 * ONE_DAY_MS;
|
||||
let open_ts = close_ts - 30 * ONE_DAY_MS;
|
||||
|
||||
let mut build_updates = db_client()
|
||||
.await
|
||||
.updates
|
||||
.find(
|
||||
doc! {
|
||||
"start_ts": {
|
||||
"$gte": open_ts,
|
||||
"$lt": close_ts
|
||||
},
|
||||
"operation": Operation::RunBuild.to_string(),
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to get updates cursor")?;
|
||||
|
||||
let mut days = HashMap::<i64, BuildStatsDay>::with_capacity(32);
|
||||
|
||||
let mut curr = open_ts;
|
||||
|
||||
while curr < close_ts {
|
||||
let stats = BuildStatsDay {
|
||||
ts: curr as f64,
|
||||
..Default::default()
|
||||
};
|
||||
days.insert(curr, stats);
|
||||
curr += ONE_DAY_MS;
|
||||
}
|
||||
|
||||
while let Some(update) = build_updates.try_next().await? {
|
||||
if let Some(end_ts) = update.end_ts {
|
||||
let day = update.start_ts - update.start_ts % ONE_DAY_MS;
|
||||
let entry = days.entry(day).or_default();
|
||||
entry.count += 1.0;
|
||||
entry.time += ms_to_hour(end_ts - update.start_ts);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetBuildMonthlyStatsResponse::new(
|
||||
days.into_values().collect(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
const MS_TO_HOUR_DIVISOR: f64 = 1000.0 * 60.0 * 60.0;
|
||||
fn ms_to_hour(duration: i64) -> f64 {
|
||||
duration as f64 / MS_TO_HOUR_DIVISOR
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildVersions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildVersions {
|
||||
build,
|
||||
major,
|
||||
minor,
|
||||
patch,
|
||||
}: GetBuildVersions,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<BuildVersionResponseItem>> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&build,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut filter = doc! {
|
||||
"target": {
|
||||
"type": "Build",
|
||||
"id": build.id
|
||||
},
|
||||
"operation": Operation::RunBuild.to_string(),
|
||||
"status": UpdateStatus::Complete.to_string(),
|
||||
"success": true
|
||||
};
|
||||
if let Some(major) = major {
|
||||
filter.insert("version.major", major);
|
||||
}
|
||||
if let Some(minor) = minor {
|
||||
filter.insert("version.minor", minor);
|
||||
}
|
||||
if let Some(patch) = patch {
|
||||
filter.insert("version.patch", patch);
|
||||
}
|
||||
|
||||
let versions = find_collect(
|
||||
&db_client().await.updates,
|
||||
filter,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "_id": -1 })
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull versions from mongo")?
|
||||
.into_iter()
|
||||
.map(|u| (u.version, u.start_ts))
|
||||
.filter(|(v, _)| !v.is_none())
|
||||
.map(|(version, ts)| BuildVersionResponseItem { version, ts })
|
||||
.collect();
|
||||
Ok(versions)
|
||||
}
|
||||
}
|
||||
|
||||
fn docker_organizations() -> &'static String {
|
||||
static DOCKER_ORGANIZATIONS: OnceLock<String> = OnceLock::new();
|
||||
DOCKER_ORGANIZATIONS.get_or_init(|| {
|
||||
serde_json::to_string(&core_config().docker_organizations)
|
||||
.expect("failed to serialize docker organizations")
|
||||
})
|
||||
}
|
||||
|
||||
impl ResolveToString<ListDockerOrganizations, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
ListDockerOrganizations {}: ListDockerOrganizations,
|
||||
_: User,
|
||||
) -> anyhow::Result<String> {
|
||||
Ok(docker_organizations().clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonBuildExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonBuildExtraArgs { query }: ListCommonBuildExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonBuildExtraArgsResponse> {
|
||||
let builds = resource::list_full_for_user::<Build>(query, &user)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
// first collect with guaranteed uniqueness
|
||||
let mut res = HashSet::<String>::new();
|
||||
|
||||
for build in builds {
|
||||
for extra_arg in build.config.extra_args {
|
||||
res.insert(extra_arg);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res.into_iter().collect())
|
||||
}
|
||||
}
|
||||
130
bin/core/src/api/read/builder.rs
Normal file
130
bin/core/src/api/read/builder.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::{collections::HashSet, str::FromStr};
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{self, *},
|
||||
entities::{
|
||||
builder::{Builder, BuilderConfig, BuilderListItem},
|
||||
permission::PermissionLevel,
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetBuilder, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuilder { builder }: GetBuilder,
|
||||
user: User,
|
||||
) -> anyhow::Result<Builder> {
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&builder,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListBuilders, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListBuilders { query }: ListBuilders,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<BuilderListItem>> {
|
||||
resource::list_for_user::<Builder>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuildersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuildersSummary {}: GetBuildersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuildersSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Builder,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.builders
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all builder documents")?;
|
||||
let res = GetBuildersSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetBuilderAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetBuilderAvailableAccounts { builder }: GetBuilderAvailableAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetBuilderAvailableAccountsResponse> {
|
||||
let builder = resource::get_check_permissions::<Builder>(
|
||||
&builder,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let (github, docker) = match builder.config {
|
||||
BuilderConfig::Aws(config) => {
|
||||
(config.github_accounts, config.docker_accounts)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
let res = self
|
||||
.resolve(
|
||||
read::GetAvailableAccounts {
|
||||
server: config.server_id,
|
||||
},
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
(res.github, res.docker)
|
||||
}
|
||||
};
|
||||
|
||||
let mut github_set = HashSet::<String>::new();
|
||||
|
||||
github_set.extend(core_config().github_accounts.keys().cloned());
|
||||
github_set.extend(github);
|
||||
|
||||
let mut github = github_set.into_iter().collect::<Vec<_>>();
|
||||
github.sort();
|
||||
|
||||
let mut docker_set = HashSet::<String>::new();
|
||||
|
||||
docker_set.extend(core_config().docker_accounts.keys().cloned());
|
||||
docker_set.extend(docker);
|
||||
|
||||
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
|
||||
docker.sort();
|
||||
|
||||
Ok(GetBuilderAvailableAccountsResponse { github, docker })
|
||||
}
|
||||
}
|
||||
271
bin/core/src/api/read/deployment.rs
Normal file
271
bin/core/src/api/read/deployment.rs
Normal file
@@ -0,0 +1,271 @@
|
||||
use std::{cmp, collections::HashSet, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
deployment::{
|
||||
Deployment, DeploymentActionState, DeploymentConfig,
|
||||
DeploymentListItem, DeploymentState, DockerContainerStats,
|
||||
},
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, ResourceTargetVariant},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client, query::get_resource_ids_for_non_admin,
|
||||
},
|
||||
resource,
|
||||
state::{action_states, db_client, deployment_status_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetDeployment, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeployment { deployment }: GetDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListDeployments, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListDeployments { query }: ListDeployments,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<DeploymentListItem>> {
|
||||
resource::list_for_user::<Deployment>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDeploymentContainer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeploymentContainer { deployment }: GetDeploymentContainer,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetDeploymentContainerResponse> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let status = deployment_status_cache()
|
||||
.get(&deployment.id)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
let response = GetDeploymentContainerResponse {
|
||||
state: status.curr.state,
|
||||
container: status.curr.container.clone(),
|
||||
};
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
|
||||
const MAX_LOG_LENGTH: u64 = 5000;
|
||||
|
||||
impl Resolve<GetLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetLog { deployment, tail }: GetLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Log> {
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::container::GetContainerLog {
|
||||
name,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
})
|
||||
.await
|
||||
.context("failed at call to periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SearchLog, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
SearchLog {
|
||||
deployment,
|
||||
terms,
|
||||
combinator,
|
||||
}: SearchLog,
|
||||
user: User,
|
||||
) -> anyhow::Result<Log> {
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::container::GetContainerLogSearch {
|
||||
name,
|
||||
terms,
|
||||
combinator,
|
||||
})
|
||||
.await
|
||||
.context("failed at call to periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDeploymentStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeploymentStats { deployment }: GetDeploymentStats,
|
||||
user: User,
|
||||
) -> anyhow::Result<DockerContainerStats> {
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Err(anyhow!("deployment has no server attached"));
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::container::GetContainerStats { name })
|
||||
.await
|
||||
.context("failed to get stats from periphery")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDeploymentActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeploymentActionState { deployment }: GetDeploymentActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeploymentActionState> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get(&deployment.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDeploymentsSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDeploymentsSummary {}: GetDeploymentsSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetDeploymentsSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
|
||||
let deployments =
|
||||
find_collect(&db_client().await.deployments, query, None)
|
||||
.await
|
||||
.context("failed to find all deployment documents")?;
|
||||
let mut res = GetDeploymentsSummaryResponse::default();
|
||||
let status_cache = deployment_status_cache();
|
||||
for deployment in deployments {
|
||||
res.total += 1;
|
||||
let status =
|
||||
status_cache.get(&deployment.id).await.unwrap_or_default();
|
||||
match status.curr.state {
|
||||
DeploymentState::Running => {
|
||||
res.running += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
}
|
||||
DeploymentState::NotDeployed => {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
_ => {
|
||||
res.stopped += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListCommonDeploymentExtraArgs, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListCommonDeploymentExtraArgs { query }: ListCommonDeploymentExtraArgs,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListCommonDeploymentExtraArgsResponse> {
|
||||
let deployments =
|
||||
resource::list_full_for_user::<Deployment>(query, &user)
|
||||
.await
|
||||
.context("failed to get resources matching query")?;
|
||||
|
||||
// first collect with guaranteed uniqueness
|
||||
let mut res = HashSet::<String>::new();
|
||||
|
||||
for deployment in deployments {
|
||||
for extra_arg in deployment.config.extra_args {
|
||||
res.insert(extra_arg);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res.into_iter().collect())
|
||||
}
|
||||
}
|
||||
216
bin/core/src/api/read/mod.rs
Normal file
216
bin/core/src/api/read/mod.rs
Normal file
@@ -0,0 +1,216 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::anyhow;
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::read::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolve, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::auth_request, config::core_config, state::State};
|
||||
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod search;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod tag;
|
||||
mod toml;
|
||||
mod update;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum ReadRequest {
|
||||
GetVersion(GetVersion),
|
||||
GetCoreInfo(GetCoreInfo),
|
||||
|
||||
// ==== USER ====
|
||||
ListUsers(ListUsers),
|
||||
GetUsername(GetUsername),
|
||||
ListApiKeys(ListApiKeys),
|
||||
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
|
||||
ListPermissions(ListPermissions),
|
||||
GetPermissionLevel(GetPermissionLevel),
|
||||
ListUserTargetPermissions(ListUserTargetPermissions),
|
||||
|
||||
// ==== USER GROUP ====
|
||||
GetUserGroup(GetUserGroup),
|
||||
ListUserGroups(ListUserGroups),
|
||||
|
||||
// ==== SEARCH ====
|
||||
FindResources(FindResources),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
GetProceduresSummary(GetProceduresSummary),
|
||||
GetProcedure(GetProcedure),
|
||||
GetProcedureActionState(GetProcedureActionState),
|
||||
ListProcedures(ListProcedures),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate(GetServerTemplate),
|
||||
ListServerTemplates(ListServerTemplates),
|
||||
GetServerTemplatesSummary(GetServerTemplatesSummary),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
ListServers(ListServers),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetDockerContainers(GetDockerContainers),
|
||||
GetDockerImages(GetDockerImages),
|
||||
GetDockerNetworks(GetDockerNetworks),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
GetAvailableAccounts(GetAvailableAccounts),
|
||||
GetAvailableSecrets(GetAvailableSecrets),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
GetDeploymentsSummary(GetDeploymentsSummary),
|
||||
GetDeployment(GetDeployment),
|
||||
ListDeployments(ListDeployments),
|
||||
GetDeploymentContainer(GetDeploymentContainer),
|
||||
GetDeploymentActionState(GetDeploymentActionState),
|
||||
GetDeploymentStats(GetDeploymentStats),
|
||||
GetLog(GetLog),
|
||||
SearchLog(SearchLog),
|
||||
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
|
||||
|
||||
// ==== BUILD ====
|
||||
GetBuildsSummary(GetBuildsSummary),
|
||||
GetBuild(GetBuild),
|
||||
ListBuilds(ListBuilds),
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
GetBuildVersions(GetBuildVersions),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
#[to_string_resolver]
|
||||
ListDockerOrganizations(ListDockerOrganizations),
|
||||
|
||||
// ==== REPO ====
|
||||
GetReposSummary(GetReposSummary),
|
||||
GetRepo(GetRepo),
|
||||
ListRepos(ListRepos),
|
||||
GetRepoActionState(GetRepoActionState),
|
||||
|
||||
// ==== BUILDER ====
|
||||
GetBuildersSummary(GetBuildersSummary),
|
||||
GetBuilder(GetBuilder),
|
||||
ListBuilders(ListBuilders),
|
||||
GetBuilderAvailableAccounts(GetBuilderAvailableAccounts),
|
||||
|
||||
// ==== ALERTER ====
|
||||
GetAlertersSummary(GetAlertersSummary),
|
||||
GetAlerter(GetAlerter),
|
||||
ListAlerters(ListAlerters),
|
||||
|
||||
// ==== TOML ====
|
||||
ExportAllResourcesToToml(ExportAllResourcesToToml),
|
||||
ExportResourcesToToml(ExportResourcesToToml),
|
||||
|
||||
// ==== TAG ====
|
||||
GetTag(GetTag),
|
||||
ListTags(ListTags),
|
||||
|
||||
// ==== UPDATE ====
|
||||
GetUpdate(GetUpdate),
|
||||
ListUpdates(ListUpdates),
|
||||
|
||||
// ==== ALERT ====
|
||||
ListAlerts(ListAlerts),
|
||||
GetAlert(GetAlert),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
#[to_string_resolver]
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
#[to_string_resolver]
|
||||
GetSystemStats(GetSystemStats),
|
||||
#[to_string_resolver]
|
||||
GetSystemProcesses(GetSystemProcesses),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
GetVariable(GetVariable),
|
||||
ListVariables(ListVariables),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
#[instrument(name = "ReadHandler", level = "debug", skip(user))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ReadRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let timer = Instant::now();
|
||||
let req_id = Uuid::new_v4();
|
||||
debug!(
|
||||
"/read request {req_id} | user: {} ({})",
|
||||
user.username, user.id
|
||||
);
|
||||
let res =
|
||||
State
|
||||
.resolve_request(request, user)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
if let Err(e) = &res {
|
||||
warn!("/read request {req_id} error: {e:#}");
|
||||
}
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/read request {req_id} | resolve time: {elapsed:?}");
|
||||
Ok((TypedHeader(ContentType::json()), res?))
|
||||
}
|
||||
|
||||
impl Resolve<GetVersion, User> for State {
|
||||
#[instrument(name = "GetVersion", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetVersion {}: GetVersion,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetVersionResponse> {
|
||||
Ok(GetVersionResponse {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetCoreInfo, User> for State {
|
||||
#[instrument(name = "GetCoreInfo", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetCoreInfo {}: GetCoreInfo,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetCoreInfoResponse> {
|
||||
let config = core_config();
|
||||
Ok(GetCoreInfoResponse {
|
||||
title: config.title.clone(),
|
||||
monitoring_interval: config.monitoring_interval,
|
||||
github_webhook_base_url: config
|
||||
.github_webhook_base_url
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.host.clone()),
|
||||
})
|
||||
}
|
||||
}
|
||||
72
bin/core/src/api/read/permission.rs
Normal file
72
bin/core/src/api/read/permission.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
|
||||
ListPermissionsResponse, ListUserTargetPermissions,
|
||||
ListUserTargetPermissionsResponse,
|
||||
},
|
||||
entities::{permission::PermissionLevel, user::User},
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user_permission_on_resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<ListPermissions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListPermissions {}: ListPermissions,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListPermissionsResponse> {
|
||||
find_collect(
|
||||
&db_client().await.permissions,
|
||||
doc! {
|
||||
"user_target.type": "User",
|
||||
"user_target.id": &user.id
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for permissions")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetPermissionLevel, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetPermissionLevel { target }: GetPermissionLevel,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetPermissionLevelResponse> {
|
||||
if user.admin {
|
||||
return Ok(PermissionLevel::Write);
|
||||
}
|
||||
let (variant, id) = target.extract_variant_id();
|
||||
get_user_permission_on_resource(&user.id, variant, id).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListUserTargetPermissions, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListUserTargetPermissions { user_target }: ListUserTargetPermissions,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListUserTargetPermissionsResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("this method is admin only"));
|
||||
}
|
||||
let (variant, id) = user_target.extract_variant_id();
|
||||
find_collect(
|
||||
&db_client().await.permissions,
|
||||
doc! {
|
||||
"user_target.type": variant.as_ref(),
|
||||
"user_target.id": id
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for permissions")
|
||||
}
|
||||
}
|
||||
135
bin/core/src/api/read/procedure.rs
Normal file
135
bin/core/src/api/read/procedure.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetProcedure, GetProcedureActionState,
|
||||
GetProcedureActionStateResponse, GetProcedureResponse,
|
||||
GetProceduresSummary, GetProceduresSummaryResponse,
|
||||
ListProcedures, ListProceduresResponse,
|
||||
},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
procedure::{Procedure, ProcedureState},
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, db_client, procedure_state_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetProcedure, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetProcedure { procedure }: GetProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetProcedureResponse> {
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListProcedures, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListProcedures { query }: ListProcedures,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListProceduresResponse> {
|
||||
resource::list_for_user::<Procedure>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetProceduresSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetProceduresSummary {}: GetProceduresSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetProceduresSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Procedure,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
|
||||
let procedures =
|
||||
find_collect(&db_client().await.procedures, query, None)
|
||||
.await
|
||||
.context("failed to find all procedure documents")?;
|
||||
|
||||
let mut res = GetProceduresSummaryResponse::default();
|
||||
|
||||
let cache = procedure_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for procedure in procedures {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&procedure.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.procedure
|
||||
.get(&procedure.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
}
|
||||
(ProcedureState::Ok, _) => res.ok += 1,
|
||||
(ProcedureState::Failed, _) => res.failed += 1,
|
||||
(ProcedureState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the running state, since that comes from action states
|
||||
(ProcedureState::Running, _) => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetProcedureActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetProcedureActionState { procedure }: GetProcedureActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetProcedureActionStateResponse> {
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.procedure
|
||||
.get(&procedure.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
133
bin/core/src/api/read/repo.rs
Normal file
133
bin/core/src/api/read/repo.rs
Normal file
@@ -0,0 +1,133 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoActionState, RepoListItem, RepoState},
|
||||
update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{action_states, db_client, repo_state_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetRepo, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetRepo { repo }: GetRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Repo> {
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListRepos, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListRepos { query }: ListRepos,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<RepoListItem>> {
|
||||
resource::list_for_user::<Repo>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetRepoActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetRepoActionState { repo }: GetRepoActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<RepoActionState> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&repo,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.repo
|
||||
.get(&repo.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetReposSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetReposSummary {}: GetReposSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetReposSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
|
||||
let repos = find_collect(&db_client().await.repos, query, None)
|
||||
.await
|
||||
.context("failed to find all repo documents")?;
|
||||
let mut res = GetReposSummaryResponse::default();
|
||||
|
||||
let cache = repo_state_cache();
|
||||
let action_states = action_states();
|
||||
|
||||
for repo in repos {
|
||||
res.total += 1;
|
||||
|
||||
match (
|
||||
cache.get(&repo.id).await.unwrap_or_default(),
|
||||
action_states
|
||||
.repo
|
||||
.get(&repo.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.cloning => {
|
||||
res.cloning += 1;
|
||||
}
|
||||
(_, action_states) if action_states.pulling => {
|
||||
res.pulling += 1;
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _) | (RepoState::Pulling, _) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
83
bin/core/src/api/read/search.rs
Normal file
83
bin/core/src/api/read/search.rs
Normal file
@@ -0,0 +1,83 @@
|
||||
use monitor_client::{
|
||||
api::read::{FindResources, FindResourcesResponse},
|
||||
entities::{
|
||||
build::Build, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, update::ResourceTargetVariant,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
const FIND_RESOURCE_TYPES: [ResourceTargetVariant; 5] = [
|
||||
ResourceTargetVariant::Server,
|
||||
ResourceTargetVariant::Build,
|
||||
ResourceTargetVariant::Deployment,
|
||||
ResourceTargetVariant::Repo,
|
||||
ResourceTargetVariant::Procedure,
|
||||
];
|
||||
|
||||
impl Resolve<FindResources, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
FindResources { query, resources }: FindResources,
|
||||
user: User,
|
||||
) -> anyhow::Result<FindResourcesResponse> {
|
||||
let mut res = FindResourcesResponse::default();
|
||||
let resource_types = if resources.is_empty() {
|
||||
FIND_RESOURCE_TYPES.to_vec()
|
||||
} else {
|
||||
resources
|
||||
.into_iter()
|
||||
.filter(|r| {
|
||||
!matches!(
|
||||
r,
|
||||
ResourceTargetVariant::System
|
||||
| ResourceTargetVariant::Builder
|
||||
| ResourceTargetVariant::Alerter
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
for resource_type in resource_types {
|
||||
match resource_type {
|
||||
ResourceTargetVariant::Server => {
|
||||
res.servers = resource::list_for_user_using_document::<
|
||||
Server,
|
||||
>(query.clone(), &user)
|
||||
.await?;
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
res.deployments = resource::list_for_user_using_document::<
|
||||
Deployment,
|
||||
>(query.clone(), &user)
|
||||
.await?;
|
||||
}
|
||||
ResourceTargetVariant::Build => {
|
||||
res.builds =
|
||||
resource::list_for_user_using_document::<Build>(
|
||||
query.clone(),
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTargetVariant::Repo => {
|
||||
res.repos = resource::list_for_user_using_document::<Repo>(
|
||||
query.clone(),
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
res.procedures = resource::list_for_user_using_document::<
|
||||
Procedure,
|
||||
>(query.clone(), &user)
|
||||
.await?;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
434
bin/core/src/api/read/server.rs
Normal file
434
bin/core/src/api/read/server.rs
Normal file
@@ -0,0 +1,434 @@
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{
|
||||
get_timelength_in_ms, unix_timestamp_ms, FIFTEEN_SECONDS_MS,
|
||||
};
|
||||
use monitor_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
deployment::ContainerSummary,
|
||||
permission::PermissionLevel,
|
||||
server::{
|
||||
docker_image::ImageSummary, docker_network::DockerNetwork,
|
||||
Server, ServerActionState, ServerListItem, ServerState,
|
||||
},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use periphery_client::api::{self, GetAccountsResponse};
|
||||
use resolver_api::{Resolve, ResolveToString};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
state::{action_states, db_client, server_status_cache, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetServersSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetServersSummary {}: GetServersSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetServersSummaryResponse> {
|
||||
let servers =
|
||||
resource::list_for_user::<Server>(Default::default(), &user)
|
||||
.await?;
|
||||
let mut res = GetServersSummaryResponse::default();
|
||||
for server in servers {
|
||||
res.total += 1;
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
res.healthy += 1;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
ServerState::Disabled => {
|
||||
res.disabled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetPeripheryVersion, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
req: GetPeripheryVersion,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetPeripheryVersionResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&req.server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let version = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("unknown"));
|
||||
Ok(GetPeripheryVersionResponse { version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServer, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
req: GetServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Server> {
|
||||
resource::get_check_permissions::<Server>(
|
||||
&req.server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListServers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListServers { query }: ListServers,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ServerListItem>> {
|
||||
resource::list_for_user::<Server>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServerState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetServerState { server }: GetServerState,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetServerStateResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let status = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.ok_or(anyhow!("did not find cached status for server"))?;
|
||||
let response = GetServerStateResponse {
|
||||
status: status.state,
|
||||
};
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServerActionState, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetServerActionState { server }: GetServerActionState,
|
||||
user: User,
|
||||
) -> anyhow::Result<ServerActionState> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.server
|
||||
.get(&server.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
// This protects the peripheries from spam requests
|
||||
const SYSTEM_INFO_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
|
||||
type SystemInfoCache = Mutex<HashMap<String, Arc<(String, u128)>>>;
|
||||
fn system_info_cache() -> &'static SystemInfoCache {
|
||||
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
|
||||
OnceLock::new();
|
||||
SYSTEM_INFO_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl ResolveToString<GetSystemInformation, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
GetSystemInformation { server }: GetSystemInformation,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut lock = system_info_cache().lock().await;
|
||||
let res = match lock.get(&server.id) {
|
||||
Some(cached) if cached.1 > unix_timestamp_ms() => {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(api::stats::GetSystemInformation {})
|
||||
.await?;
|
||||
let res = serde_json::to_string(&stats)?;
|
||||
lock.insert(
|
||||
server.id,
|
||||
(res.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
|
||||
.into(),
|
||||
);
|
||||
res
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl ResolveToString<GetSystemStats, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
GetSystemStats { server }: GetSystemStats,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let status =
|
||||
server_status_cache().get(&server.id).await.with_context(
|
||||
|| format!("did not find status for server at {}", server.id),
|
||||
)?;
|
||||
let stats = status
|
||||
.stats
|
||||
.as_ref()
|
||||
.context("server stats not available")?;
|
||||
let stats = serde_json::to_string(&stats)?;
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
// This protects the peripheries from spam requests
|
||||
const PROCESSES_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
|
||||
type ProcessesCache = Mutex<HashMap<String, Arc<(String, u128)>>>;
|
||||
fn processes_cache() -> &'static ProcessesCache {
|
||||
static PROCESSES_CACHE: OnceLock<ProcessesCache> = OnceLock::new();
|
||||
PROCESSES_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl ResolveToString<GetSystemProcesses, User> for State {
|
||||
async fn resolve_to_string(
|
||||
&self,
|
||||
GetSystemProcesses { server }: GetSystemProcesses,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let mut lock = processes_cache().lock().await;
|
||||
let res = match lock.get(&server.id) {
|
||||
Some(cached) if cached.1 > unix_timestamp_ms() => {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(api::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
let res = serde_json::to_string(&stats)?;
|
||||
lock.insert(
|
||||
server.id,
|
||||
(res.clone(), unix_timestamp_ms() + PROCESSES_EXPIRY)
|
||||
.into(),
|
||||
);
|
||||
res
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
const STATS_PER_PAGE: i64 = 500;
|
||||
|
||||
impl Resolve<GetHistoricalServerStats, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetHistoricalServerStats {
|
||||
server,
|
||||
granularity,
|
||||
page,
|
||||
}: GetHistoricalServerStats,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetHistoricalServerStatsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let granularity =
|
||||
get_timelength_in_ms(granularity.to_string().parse().unwrap())
|
||||
as i64;
|
||||
let mut ts_vec = Vec::<i64>::new();
|
||||
let curr_ts = unix_timestamp_ms() as i64;
|
||||
let mut curr_ts = curr_ts
|
||||
- curr_ts % granularity
|
||||
- granularity * STATS_PER_PAGE * page as i64;
|
||||
for _ in 0..STATS_PER_PAGE {
|
||||
ts_vec.push(curr_ts);
|
||||
curr_ts -= granularity;
|
||||
}
|
||||
|
||||
let stats = find_collect(
|
||||
&db_client().await.stats,
|
||||
doc! {
|
||||
"sid": server.id,
|
||||
"ts": { "$in": ts_vec },
|
||||
},
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "ts": -1 })
|
||||
.skip(page as u64 * STATS_PER_PAGE as u64)
|
||||
.limit(STATS_PER_PAGE)
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull stats from db")?;
|
||||
let next_page = if stats.len() == STATS_PER_PAGE as usize {
|
||||
Some(page + 1)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let res = GetHistoricalServerStatsResponse { stats, next_page };
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerImages, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerImages { server }: GetDockerImages,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ImageSummary>> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::build::GetImageList {})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerNetworks, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerNetworks { server }: GetDockerNetworks,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<DockerNetwork>> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::network::GetNetworkList {})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetDockerContainers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetDockerContainers { server }: GetDockerContainers,
|
||||
user: User,
|
||||
) -> anyhow::Result<Vec<ContainerSummary>> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
periphery_client(&server)?
|
||||
.request(api::container::GetContainerList {})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAvailableAccounts, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAvailableAccounts { server }: GetAvailableAccounts,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAvailableAccountsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let GetAccountsResponse { github, docker } =
|
||||
periphery_client(&server)?
|
||||
.request(api::GetAccounts {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
|
||||
let mut github_set = HashSet::<String>::new();
|
||||
|
||||
github_set.extend(core_config().github_accounts.keys().cloned());
|
||||
github_set.extend(github);
|
||||
|
||||
let mut github = github_set.into_iter().collect::<Vec<_>>();
|
||||
github.sort();
|
||||
|
||||
let mut docker_set = HashSet::<String>::new();
|
||||
|
||||
docker_set.extend(core_config().docker_accounts.keys().cloned());
|
||||
docker_set.extend(docker);
|
||||
|
||||
let mut docker = docker_set.into_iter().collect::<Vec<_>>();
|
||||
docker.sort();
|
||||
|
||||
let res = GetAvailableAccountsResponse { github, docker };
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetAvailableSecrets, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetAvailableSecrets { server }: GetAvailableSecrets,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetAvailableSecretsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let secrets = periphery_client(&server)?
|
||||
.request(api::GetSecrets {})
|
||||
.await
|
||||
.context("failed to get accounts from periphery")?;
|
||||
Ok(secrets)
|
||||
}
|
||||
}
|
||||
80
bin/core/src/api/read/server_template.rs
Normal file
80
bin/core/src/api/read/server_template.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetServerTemplate, GetServerTemplateResponse,
|
||||
GetServerTemplatesSummary, GetServerTemplatesSummaryResponse,
|
||||
ListServerTemplates, ListServerTemplatesResponse,
|
||||
},
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
update::ResourceTargetVariant, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin, resource, state::{db_client, State}
|
||||
};
|
||||
|
||||
impl Resolve<GetServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetServerTemplate { server_template }: GetServerTemplate,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetServerTemplateResponse> {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&server_template,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListServerTemplates, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListServerTemplates { query }: ListServerTemplates,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListServerTemplatesResponse> {
|
||||
resource::list_for_user::<ServerTemplate>(query, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetServerTemplatesSummary, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetServerTemplatesSummary {}: GetServerTemplatesSummary,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetServerTemplatesSummaryResponse> {
|
||||
let query = if user.admin {
|
||||
None
|
||||
} else {
|
||||
let ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::ServerTemplate,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flat_map(|id| ObjectId::from_str(&id))
|
||||
.collect::<Vec<_>>();
|
||||
let query = doc! {
|
||||
"_id": { "$in": ids }
|
||||
};
|
||||
Some(query)
|
||||
};
|
||||
let total = db_client()
|
||||
.await
|
||||
.builders
|
||||
.count_documents(query, None)
|
||||
.await
|
||||
.context("failed to count all builder documents")?;
|
||||
let res = GetServerTemplatesSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
34
bin/core/src/api/read/tag.rs
Normal file
34
bin/core/src/api/read/tag.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::{tag::Tag, user::User},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_tag,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetTag, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetTag { tag }: GetTag,
|
||||
_: User,
|
||||
) -> anyhow::Result<Tag> {
|
||||
get_tag(&tag).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListTags, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListTags { query }: ListTags,
|
||||
_: User,
|
||||
) -> anyhow::Result<Vec<Tag>> {
|
||||
find_collect(&db_client().await.tags, query, None)
|
||||
.await
|
||||
.context("failed to get tags from db")
|
||||
}
|
||||
}
|
||||
467
bin/core/src/api/read/toml.rs
Normal file
467
bin/core/src/api/read/toml.rs
Normal file
@@ -0,0 +1,467 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::{
|
||||
execute::Execution,
|
||||
read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
ExportResourcesToToml, ExportResourcesToTomlResponse,
|
||||
GetUserGroup, ListUserTargetPermissions,
|
||||
},
|
||||
},
|
||||
entities::{
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::{Deployment, DeploymentImage},
|
||||
permission::{PermissionLevel, UserTarget},
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
toml::{
|
||||
PermissionToml, ResourceToml, ResourcesToml, UserGroupToml,
|
||||
},
|
||||
update::ResourceTarget,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user_user_group_ids,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<ExportAllResourcesToToml, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ExportAllResourcesToToml {}: ExportAllResourcesToToml,
|
||||
user: User,
|
||||
) -> anyhow::Result<ExportAllResourcesToTomlResponse> {
|
||||
let mut targets = Vec::<ResourceTarget>::new();
|
||||
|
||||
targets.extend(
|
||||
resource::list_for_user::<Alerter>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Builder>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Server>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Repo>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Procedure>(Default::default(), &user)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
Default::default(),
|
||||
&user,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
|
||||
);
|
||||
|
||||
let user_groups = if user.admin {
|
||||
find_collect(&db_client().await.user_groups, None, None)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.into_iter()
|
||||
.map(|user_group| user_group.id)
|
||||
.collect()
|
||||
} else {
|
||||
get_user_user_group_ids(&user.id).await?
|
||||
};
|
||||
|
||||
self
|
||||
.resolve(
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
},
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExportResourcesToToml, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ExportResourcesToToml {
|
||||
targets,
|
||||
user_groups,
|
||||
}: ExportResourcesToToml,
|
||||
user: User,
|
||||
) -> anyhow::Result<ExportResourcesToTomlResponse> {
|
||||
let mut res = ResourcesToml::default();
|
||||
let names = ResourceNames::new()
|
||||
.await
|
||||
.context("failed to init resource name maps")?;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let alerter = resource::get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res.alerters.push(convert_resource(alerter, &names.tags))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
ServerTemplate,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
res
|
||||
.server_templates
|
||||
.push(convert_resource(template, &names.tags))
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
res.servers.push(convert_resource(server, &names.tags))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder =
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace server id of builder
|
||||
if let BuilderConfig::Server(config) = &mut builder.config {
|
||||
config.server_id.clone_from(
|
||||
names.servers.get(&id).unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
res.builders.push(convert_resource(builder, &names.tags))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace builder id of build
|
||||
build.config.builder_id.clone_from(
|
||||
names
|
||||
.builders
|
||||
.get(&build.config.builder_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
res.builds.push(convert_resource(build, &names.tags))
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
let mut deployment = resource::get_check_permissions::<
|
||||
Deployment,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Read
|
||||
)
|
||||
.await?;
|
||||
// replace deployment server with name
|
||||
deployment.config.server_id.clone_from(
|
||||
names
|
||||
.servers
|
||||
.get(&deployment.config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
// replace deployment build id with name
|
||||
if let DeploymentImage::Build { build_id, .. } =
|
||||
&mut deployment.config.image
|
||||
{
|
||||
build_id.clone_from(
|
||||
names.builds.get(build_id).unwrap_or(&String::new()),
|
||||
);
|
||||
}
|
||||
res
|
||||
.deployments
|
||||
.push(convert_resource(deployment, &names.tags))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
// replace repo server with name
|
||||
repo.config.server_id.clone_from(
|
||||
names
|
||||
.servers
|
||||
.get(&repo.config.server_id)
|
||||
.unwrap_or(&String::new()),
|
||||
);
|
||||
res.repos.push(convert_resource(repo, &names.tags))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
add_procedure(&id, &mut res, &user, &names)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to add procedure {id}")
|
||||
})?;
|
||||
}
|
||||
ResourceTarget::System(_) => continue,
|
||||
};
|
||||
}
|
||||
|
||||
add_user_groups(user_groups, &mut res, &user)
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
let toml = toml::to_string(&res)
|
||||
.context("failed to serialize resources to toml")?;
|
||||
|
||||
Ok(ExportResourcesToTomlResponse { toml })
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_procedure(
|
||||
id: &str,
|
||||
res: &mut ResourcesToml,
|
||||
user: &User,
|
||||
names: &ResourceNames,
|
||||
) -> anyhow::Result<()> {
|
||||
let mut procedure = resource::get_check_permissions::<Procedure>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
for execution in &mut procedure.config.executions {
|
||||
match &mut execution.execution {
|
||||
Execution::RunProcedure(exec) => exec.procedure.clone_from(
|
||||
names
|
||||
.procedures
|
||||
.get(&exec.procedure)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RunBuild(exec) => exec.build.clone_from(
|
||||
names.builds.get(&exec.build).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::Deploy(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StartContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::RemoveContainer(exec) => exec.deployment.clone_from(
|
||||
names
|
||||
.deployments
|
||||
.get(&exec.deployment)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::CloneRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PullRepo(exec) => exec.repo.clone_from(
|
||||
names.repos.get(&exec.repo).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::StopAllContainers(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerNetworks(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerImages(exec) => exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::PruneDockerContainers(exec) => {
|
||||
exec.server.clone_from(
|
||||
names.servers.get(&exec.server).unwrap_or(&String::new()),
|
||||
)
|
||||
}
|
||||
Execution::None(_) => continue,
|
||||
}
|
||||
}
|
||||
res
|
||||
.procedures
|
||||
.push(convert_resource(procedure, &names.tags));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct ResourceNames {
|
||||
tags: HashMap<String, String>,
|
||||
servers: HashMap<String, String>,
|
||||
builders: HashMap<String, String>,
|
||||
builds: HashMap<String, String>,
|
||||
repos: HashMap<String, String>,
|
||||
deployments: HashMap<String, String>,
|
||||
procedures: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl ResourceNames {
|
||||
async fn new() -> anyhow::Result<ResourceNames> {
|
||||
let db = db_client().await;
|
||||
Ok(ResourceNames {
|
||||
tags: find_collect(&db.tags, None, None)
|
||||
.await
|
||||
.context("failed to get all tags")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
servers: find_collect(&db.servers, None, None)
|
||||
.await
|
||||
.context("failed to get all servers")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
builders: find_collect(&db.builders, None, None)
|
||||
.await
|
||||
.context("failed to get all builders")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
builds: find_collect(&db.builds, None, None)
|
||||
.await
|
||||
.context("failed to get all builds")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
repos: find_collect(&db.repos, None, None)
|
||||
.await
|
||||
.context("failed to get all repos")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
deployments: find_collect(&db.deployments, None, None)
|
||||
.await
|
||||
.context("failed to get all deployments")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
procedures: find_collect(&db.procedures, None, None)
|
||||
.await
|
||||
.context("failed to get all procedures")?
|
||||
.into_iter()
|
||||
.map(|t| (t.id, t.name))
|
||||
.collect::<HashMap<_, _>>(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn add_user_groups(
|
||||
user_groups: Vec<String>,
|
||||
res: &mut ResourcesToml,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let db = db_client().await;
|
||||
|
||||
let usernames = find_collect(&db.users, None, None)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|user| (user.id, user.username))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
for user_group in user_groups {
|
||||
let ug = State
|
||||
.resolve(GetUserGroup { user_group }, user.clone())
|
||||
.await?;
|
||||
// this method is admin only, but we already know user can see user group if above does not return Err
|
||||
let permissions = State
|
||||
.resolve(
|
||||
ListUserTargetPermissions {
|
||||
user_target: UserTarget::UserGroup(ug.id),
|
||||
},
|
||||
User {
|
||||
admin: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|permission| PermissionToml {
|
||||
target: permission.resource_target,
|
||||
level: permission.level,
|
||||
})
|
||||
.collect();
|
||||
res.user_groups.push(UserGroupToml {
|
||||
name: ug.name,
|
||||
users: ug
|
||||
.users
|
||||
.into_iter()
|
||||
.filter_map(|user_id| usernames.get(&user_id).cloned())
|
||||
.collect(),
|
||||
permissions,
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn convert_resource<Config, Info: Default, PartialConfig>(
|
||||
resource: Resource<Config, Info>,
|
||||
tag_names: &HashMap<String, String>,
|
||||
) -> ResourceToml<PartialConfig>
|
||||
where
|
||||
Config: Into<PartialConfig>,
|
||||
{
|
||||
ResourceToml {
|
||||
name: resource.name,
|
||||
tags: resource
|
||||
.tags
|
||||
.iter()
|
||||
.filter_map(|t| tag_names.get(t).cloned())
|
||||
.collect(),
|
||||
description: resource.description,
|
||||
config: resource.config.into(),
|
||||
}
|
||||
}
|
||||
242
bin/core/src/api/read/update.rs
Normal file
242
bin/core/src/api/read/update.rs
Normal file
@@ -0,0 +1,242 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
alerter::Alerter,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
deployment::Deployment,
|
||||
permission::PermissionLevel,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
update::{
|
||||
ResourceTarget, ResourceTargetVariant, Update, UpdateListItem,
|
||||
},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_resource_ids_for_non_admin,
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const UPDATES_PER_PAGE: i64 = 100;
|
||||
|
||||
impl Resolve<ListUpdates, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListUpdates { query, page }: ListUpdates,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListUpdatesResponse> {
|
||||
let query = if user.admin {
|
||||
query
|
||||
} else {
|
||||
let server_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Server,
|
||||
)
|
||||
.await?;
|
||||
let deployment_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Deployment,
|
||||
)
|
||||
.await?;
|
||||
let build_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Build,
|
||||
)
|
||||
.await?;
|
||||
let repo_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Repo,
|
||||
)
|
||||
.await?;
|
||||
let procedure_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Procedure,
|
||||
)
|
||||
.await?;
|
||||
let builder_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Builder,
|
||||
)
|
||||
.await?;
|
||||
let alerter_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::Alerter,
|
||||
)
|
||||
.await?;
|
||||
let server_template_ids = get_resource_ids_for_non_admin(
|
||||
&user.id,
|
||||
ResourceTargetVariant::ServerTemplate,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut query = query.unwrap_or_default();
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
{ "target.type": "Deployment", "target.id": { "$in": &deployment_ids } },
|
||||
{ "target.type": "Build", "target.id": { "$in": &build_ids } },
|
||||
{ "target.type": "Repo", "target.id": { "$in": &repo_ids } },
|
||||
{ "target.type": "Procedure", "target.id": { "$in": &procedure_ids } },
|
||||
{ "target.type": "Builder", "target.id": { "$in": &builder_ids } },
|
||||
{ "target.type": "Alerter", "target.id": { "$in": &alerter_ids } },
|
||||
{ "target.type": "ServerTemplate", "target.id": { "$in": &server_template_ids } },
|
||||
]
|
||||
});
|
||||
query.into()
|
||||
};
|
||||
|
||||
let usernames =
|
||||
find_collect(&db_client().await.users, None, None)
|
||||
.await
|
||||
.context("failed to pull users from db")?
|
||||
.into_iter()
|
||||
.map(|u| (u.id, u.username))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
let updates = find_collect(
|
||||
&db_client().await.updates,
|
||||
query,
|
||||
FindOptions::builder()
|
||||
.sort(doc! { "start_ts": -1 })
|
||||
.skip(page as u64 * UPDATES_PER_PAGE as u64)
|
||||
.limit(UPDATES_PER_PAGE)
|
||||
.build(),
|
||||
)
|
||||
.await
|
||||
.context("failed to pull updates from db")?
|
||||
.into_iter()
|
||||
.map(|u| {
|
||||
let username = if User::is_service_user(&u.operator) {
|
||||
u.operator.clone()
|
||||
} else {
|
||||
usernames
|
||||
.get(&u.operator)
|
||||
.cloned()
|
||||
.unwrap_or("unknown".to_string())
|
||||
};
|
||||
UpdateListItem {
|
||||
username,
|
||||
id: u.id,
|
||||
operation: u.operation,
|
||||
start_ts: u.start_ts,
|
||||
success: u.success,
|
||||
operator: u.operator,
|
||||
target: u.target,
|
||||
status: u.status,
|
||||
version: u.version,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let next_page = if updates.len() == UPDATES_PER_PAGE as usize {
|
||||
Some(page + 1)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(ListUpdatesResponse { updates, next_page })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<GetUpdate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetUpdate { id }: GetUpdate,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let update = find_one_by_id(&db_client().await.updates, &id)
|
||||
.await
|
||||
.context("failed to query to db")?
|
||||
.context("no update exists with given id")?;
|
||||
if user.admin {
|
||||
return Ok(update);
|
||||
}
|
||||
match &update.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(anyhow!(
|
||||
"user must be admin to view system updates"
|
||||
))
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get_check_permissions::<Server>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::get_check_permissions::<Build>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::get_check_permissions::<Repo>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::get_check_permissions::<Builder>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
id,
|
||||
&user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
113
bin/core/src/api/read/user.rs
Normal file
113
bin/core/src/api/read/user.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetUsername, GetUsernameResponse, ListApiKeys,
|
||||
ListApiKeysForServiceUser, ListApiKeysForServiceUserResponse,
|
||||
ListApiKeysResponse, ListUsers, ListUsersResponse,
|
||||
},
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id, find::find_collect, mongodb::bson::doc,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
impl Resolve<GetUsername, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetUsername { user_id }: GetUsername,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetUsernameResponse> {
|
||||
let user = find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed at mongo query for user")?
|
||||
.context("no user found with id")?;
|
||||
|
||||
let avatar = match user.config {
|
||||
UserConfig::Github { avatar, .. } => Some(avatar),
|
||||
UserConfig::Google { avatar, .. } => Some(avatar),
|
||||
_ => None,
|
||||
};
|
||||
|
||||
Ok(GetUsernameResponse {
|
||||
username: user.username,
|
||||
avatar,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListUsers, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListUsers {}: ListUsers,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListUsersResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("this route is only accessable by admins"));
|
||||
}
|
||||
let mut users =
|
||||
find_collect(&db_client().await.users, None, None)
|
||||
.await
|
||||
.context("failed to pull users from db")?;
|
||||
users.iter_mut().for_each(|user| user.sanitize());
|
||||
Ok(users)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListApiKeys, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListApiKeys {}: ListApiKeys,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListApiKeysResponse> {
|
||||
let api_keys = find_collect(
|
||||
&db_client().await.api_keys,
|
||||
doc! { "user_id": &user.id },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for api keys")?
|
||||
.into_iter()
|
||||
.map(|mut api_keys| {
|
||||
api_keys.sanitize();
|
||||
api_keys
|
||||
})
|
||||
.collect();
|
||||
Ok(api_keys)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListApiKeysForServiceUser, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListApiKeysForServiceUser { user_id }: ListApiKeysForServiceUser,
|
||||
admin: User,
|
||||
) -> anyhow::Result<ListApiKeysForServiceUserResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This method is admin only."));
|
||||
}
|
||||
let user = find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for users")?
|
||||
.context("user at id not found")?;
|
||||
let UserConfig::Service { .. } = user.config else {
|
||||
return Err(anyhow!("Given user is not service user"));
|
||||
};
|
||||
let api_keys = find_collect(
|
||||
&db_client().await.api_keys,
|
||||
doc! { "user_id": user_id },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to query db for api keys")?
|
||||
.into_iter()
|
||||
.map(|mut api_keys| {
|
||||
api_keys.sanitize();
|
||||
api_keys
|
||||
})
|
||||
.collect();
|
||||
Ok(api_keys)
|
||||
}
|
||||
}
|
||||
58
bin/core/src/api/read/user_group.rs
Normal file
58
bin/core/src/api/read/user_group.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetUserGroup, GetUserGroupResponse, ListUserGroups,
|
||||
ListUserGroupsResponse,
|
||||
},
|
||||
entities::user::User,
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId, Document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
impl Resolve<GetUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetUserGroup { user_group }: GetUserGroup,
|
||||
user: User,
|
||||
) -> anyhow::Result<GetUserGroupResponse> {
|
||||
let mut filter = match ObjectId::from_str(&user_group) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": &user_group },
|
||||
};
|
||||
// Don't allow non admin users to get UserGroups they aren't a part of.
|
||||
if !user.admin {
|
||||
// Filter for only UserGroups which contain the users id
|
||||
filter.insert("users", &user.id);
|
||||
}
|
||||
db_client()
|
||||
.await
|
||||
.user_groups
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.context("no UserGroup found with given name or id")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListUserGroups, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListUserGroups {}: ListUserGroups,
|
||||
user: User,
|
||||
) -> anyhow::Result<ListUserGroupsResponse> {
|
||||
let mut filter = Document::new();
|
||||
if !user.admin {
|
||||
filter.insert("users", &user.id);
|
||||
}
|
||||
find_collect(&db_client().await.user_groups, filter, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")
|
||||
}
|
||||
}
|
||||
43
bin/core/src/api/read/variable.rs
Normal file
43
bin/core/src/api/read/variable.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::read::{
|
||||
GetVariable, GetVariableResponse, ListVariables,
|
||||
ListVariablesResponse,
|
||||
},
|
||||
entities::user::User,
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_variable,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<GetVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
GetVariable { name }: GetVariable,
|
||||
_: User,
|
||||
) -> anyhow::Result<GetVariableResponse> {
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ListVariables, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
ListVariables {}: ListVariables,
|
||||
_: User,
|
||||
) -> anyhow::Result<ListVariablesResponse> {
|
||||
let variables =
|
||||
find_collect(&db_client().await.variables, None, None)
|
||||
.await
|
||||
.context("failed to query db for variables")?;
|
||||
Ok(ListVariablesResponse {
|
||||
variables,
|
||||
secrets: core_config().secrets.keys().cloned().collect(),
|
||||
})
|
||||
}
|
||||
}
|
||||
64
bin/core/src/api/write/alerter.rs
Normal file
64
bin/core/src/api/write/alerter.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CopyAlerter, CreateAlerter, DeleteAlerter, UpdateAlerter,
|
||||
},
|
||||
entities::{
|
||||
alerter::Alerter, permission::PermissionLevel, user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateAlerter, User> for State {
|
||||
#[instrument(name = "CreateAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateAlerter { name, config }: CreateAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
resource::create::<Alerter>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyAlerter, User> for State {
|
||||
#[instrument(name = "CopyAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyAlerter { name, id }: CopyAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
let Alerter {
|
||||
config,
|
||||
..
|
||||
} = resource::get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Alerter>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteAlerter, User> for State {
|
||||
#[instrument(name = "DeleteAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteAlerter { id }: DeleteAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
resource::delete::<Alerter>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateAlerter, User> for State {
|
||||
#[instrument(name = "UpdateAlerter", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateAlerter { id, config }: UpdateAlerter,
|
||||
user: User,
|
||||
) -> anyhow::Result<Alerter> {
|
||||
resource::update::<Alerter>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
147
bin/core/src/api/write/api_key.rs
Normal file
147
bin/core/src/api/write/api_key.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
api_key::ApiKey,
|
||||
monitor_timestamp,
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::find_one_by_id, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
auth::random_string,
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<CreateApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "CreateApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKey { name, expires }: CreateApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyResponse> {
|
||||
let user = get_user(&user.id).await?;
|
||||
|
||||
let key = format!("K-{}", random_string(SECRET_LENGTH));
|
||||
let secret = format!("S-{}", random_string(SECRET_LENGTH));
|
||||
let secret_hash = bcrypt::hash(&secret, BCRYPT_COST)
|
||||
.context("failed at hashing secret string")?;
|
||||
|
||||
let api_key = ApiKey {
|
||||
name,
|
||||
key: key.clone(),
|
||||
secret: secret_hash,
|
||||
user_id: user.id.clone(),
|
||||
created_at: monitor_timestamp(),
|
||||
expires,
|
||||
};
|
||||
db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.insert_one(api_key, None)
|
||||
.await
|
||||
.context("failed to create api key on db")?;
|
||||
Ok(CreateApiKeyResponse { key, secret })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteApiKey, User> for State {
|
||||
#[instrument(
|
||||
name = "DeleteApiKey",
|
||||
level = "debug",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKey { key }: DeleteApiKey,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyResponse> {
|
||||
let client = db_client().await;
|
||||
let key = client
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed at db query")?
|
||||
.context("no api key with key found")?;
|
||||
if user.id != key.user_id {
|
||||
return Err(anyhow!("api key does not belong to user"));
|
||||
}
|
||||
client
|
||||
.api_keys
|
||||
.delete_one(doc! { "key": key.key }, None)
|
||||
.await
|
||||
.context("failed to delete api key from db")?;
|
||||
Ok(DeleteApiKeyResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "CreateApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateApiKeyForServiceUser {
|
||||
user_id,
|
||||
name,
|
||||
expires,
|
||||
}: CreateApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
self
|
||||
.resolve(CreateApiKey { name, expires }, service_user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteApiKeyForServiceUser, User> for State {
|
||||
#[instrument(name = "DeleteApiKeyForServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteApiKeyForServiceUser { key }: DeleteApiKeyForServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteApiKeyForServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let api_key = db
|
||||
.api_keys
|
||||
.find_one(doc! { "key": &key }, None)
|
||||
.await
|
||||
.context("failed to query db for api key")?
|
||||
.context("did not find matching api key")?;
|
||||
let service_user =
|
||||
find_one_by_id(&db_client().await.users, &api_key.user_id)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user found with id")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
db.api_keys
|
||||
.delete_one(doc! { "key": key }, None)
|
||||
.await
|
||||
.context("failed to delete api key on db")?;
|
||||
Ok(DeleteApiKeyForServiceUserResponse {})
|
||||
}
|
||||
}
|
||||
60
bin/core/src/api/write/build.rs
Normal file
60
bin/core/src/api/write/build.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{build::Build, permission::PermissionLevel, user::User},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateBuild, User> for State {
|
||||
#[instrument(name = "CreateBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateBuild { name, config }: CreateBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
resource::create::<Build>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyBuild, User> for State {
|
||||
#[instrument(name = "CopyBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyBuild { name, id }: CopyBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
let Build {
|
||||
config,
|
||||
..
|
||||
} = resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Build>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteBuild, User> for State {
|
||||
#[instrument(name = "DeleteBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteBuild { id }: DeleteBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
resource::delete::<Build>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateBuild, User> for State {
|
||||
#[instrument(name = "UpdateBuild", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateBuild { id, config }: UpdateBuild,
|
||||
user: User,
|
||||
) -> anyhow::Result<Build> {
|
||||
resource::update::<Build>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
59
bin/core/src/api/write/builder.rs
Normal file
59
bin/core/src/api/write/builder.rs
Normal file
@@ -0,0 +1,59 @@
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
builder::Builder, permission::PermissionLevel, user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateBuilder, User> for State {
|
||||
#[instrument(name = "CreateBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateBuilder { name, config }: CreateBuilder,
|
||||
user: User,
|
||||
) -> anyhow::Result<Builder> {
|
||||
resource::create::<Builder>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyBuilder, User> for State {
|
||||
#[instrument(name = "CopyBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyBuilder { name, id }: CopyBuilder,
|
||||
user: User,
|
||||
) -> anyhow::Result<Builder> {
|
||||
let Builder { config, .. } = resource::get_check_permissions::<
|
||||
Builder,
|
||||
>(
|
||||
&id, &user, PermissionLevel::Write
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Builder>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteBuilder, User> for State {
|
||||
#[instrument(name = "DeleteBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteBuilder { id }: DeleteBuilder,
|
||||
user: User,
|
||||
) -> anyhow::Result<Builder> {
|
||||
resource::delete::<Builder>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateBuilder, User> for State {
|
||||
#[instrument(name = "UpdateBuilder", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateBuilder { id, config }: UpdateBuilder,
|
||||
user: User,
|
||||
) -> anyhow::Result<Builder> {
|
||||
resource::update::<Builder>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
155
bin/core/src/api/write/deployment.rs
Normal file
155
bin/core/src/api/write/deployment.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
deployment::{Deployment, DeploymentState},
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
to_monitor_name,
|
||||
update::Update,
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::get_deployment_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
resource,
|
||||
state::{action_states, db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateDeployment, User> for State {
|
||||
#[instrument(name = "CreateDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateDeployment { name, config }: CreateDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
resource::create::<Deployment>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyDeployment, User> for State {
|
||||
#[instrument(name = "CopyDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyDeployment { name, id }: CopyDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
let Deployment { config, .. } =
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Deployment>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteDeployment, User> for State {
|
||||
#[instrument(name = "DeleteDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteDeployment { id }: DeleteDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
resource::delete::<Deployment>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateDeployment, User> for State {
|
||||
#[instrument(name = "UpdateDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateDeployment { id, config }: UpdateDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Deployment> {
|
||||
resource::update::<Deployment>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RenameDeployment, User> for State {
|
||||
#[instrument(name = "RenameDeployment", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameDeployment { id, name }: RenameDeployment,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// get the action state for the deployment (or insert default).
|
||||
let action_state = action_states()
|
||||
.deployment
|
||||
.get_or_insert_default(&deployment.id)
|
||||
.await;
|
||||
|
||||
// Will check to ensure deployment not already busy before updating, and return Err if so.
|
||||
// The returned guard will set the action state back to default when dropped.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_monitor_name(&name);
|
||||
|
||||
let container_state = get_deployment_state(&deployment).await?;
|
||||
|
||||
if container_state == DeploymentState::Unknown {
|
||||
return Err(anyhow!(
|
||||
"cannot rename deployment when container status is unknown"
|
||||
));
|
||||
}
|
||||
|
||||
let mut update =
|
||||
make_update(&deployment, Operation::RenameDeployment, &user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.deployments,
|
||||
&deployment.id,
|
||||
mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": monitor_timestamp() },
|
||||
),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update deployment name on db")?;
|
||||
|
||||
if container_state != DeploymentState::NotDeployed {
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
let log = periphery_client(&server)?
|
||||
.request(api::container::RenameContainer {
|
||||
curr_name: deployment.name.clone(),
|
||||
new_name: name.clone(),
|
||||
})
|
||||
.await
|
||||
.context("failed to rename container on server")?;
|
||||
update.logs.push(log);
|
||||
}
|
||||
|
||||
update.push_simple_log(
|
||||
"rename deployment",
|
||||
format!(
|
||||
"renamed deployment from {} to {}",
|
||||
deployment.name, name
|
||||
),
|
||||
);
|
||||
update.finalize();
|
||||
|
||||
add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
98
bin/core/src/api/write/description.rs
Normal file
98
bin/core/src/api/write/description.rs
Normal file
@@ -0,0 +1,98 @@
|
||||
use anyhow::anyhow;
|
||||
use monitor_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, server_template::ServerTemplate,
|
||||
update::ResourceTarget, user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<UpdateDescription, User> for State {
|
||||
#[instrument(name = "UpdateDescription", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateDescription {
|
||||
target,
|
||||
description,
|
||||
}: UpdateDescription,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateDescriptionResponse> {
|
||||
match target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(anyhow!(
|
||||
"cannot update description of System resource target"
|
||||
))
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_description::<Server>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_description::<Deployment>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_description::<Build>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_description::<Repo>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_description::<Builder>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_description::<Alerter>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_description::<Procedure>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::update_description::<ServerTemplate>(
|
||||
&id,
|
||||
&description,
|
||||
&user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
}
|
||||
183
bin/core/src/api/write/mod.rs
Normal file
183
bin/core/src/api/write/mod.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{middleware, routing::post, Extension, Router};
|
||||
use axum_extra::{headers::ContentType, TypedHeader};
|
||||
use monitor_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::{derive::Resolver, Resolver};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::auth_request, state::State};
|
||||
|
||||
mod alerter;
|
||||
mod api_key;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod description;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod tag;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, Resolver)]
|
||||
#[resolver_target(State)]
|
||||
#[resolver_args(User)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
enum WriteRequest {
|
||||
// ==== API KEY ====
|
||||
CreateApiKey(CreateApiKey),
|
||||
DeleteApiKey(DeleteApiKey),
|
||||
CreateApiKeyForServiceUser(CreateApiKeyForServiceUser),
|
||||
DeleteApiKeyForServiceUser(DeleteApiKeyForServiceUser),
|
||||
|
||||
// ==== USER ====
|
||||
PushRecentlyViewed(PushRecentlyViewed),
|
||||
SetLastSeenUpdate(SetLastSeenUpdate),
|
||||
CreateServiceUser(CreateServiceUser),
|
||||
UpdateServiceUserDescription(UpdateServiceUserDescription),
|
||||
|
||||
// ==== USER GROUP ====
|
||||
CreateUserGroup(CreateUserGroup),
|
||||
RenameUserGroup(RenameUserGroup),
|
||||
DeleteUserGroup(DeleteUserGroup),
|
||||
AddUserToUserGroup(AddUserToUserGroup),
|
||||
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
|
||||
SetUsersInUserGroup(SetUsersInUserGroup),
|
||||
|
||||
// ==== PERMISSIONS ====
|
||||
UpdateUserBasePermissions(UpdateUserBasePermissions),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
|
||||
// ==== DESCRIPTION ====
|
||||
UpdateDescription(UpdateDescription),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
DeleteNetwork(DeleteNetwork),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
CopyDeployment(CopyDeployment),
|
||||
DeleteDeployment(DeleteDeployment),
|
||||
UpdateDeployment(UpdateDeployment),
|
||||
RenameDeployment(RenameDeployment),
|
||||
|
||||
// ==== BUILD ====
|
||||
CreateBuild(CreateBuild),
|
||||
CopyBuild(CopyBuild),
|
||||
DeleteBuild(DeleteBuild),
|
||||
UpdateBuild(UpdateBuild),
|
||||
|
||||
// ==== BUILDER ====
|
||||
CreateBuilder(CreateBuilder),
|
||||
CopyBuilder(CopyBuilder),
|
||||
DeleteBuilder(DeleteBuilder),
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
CreateServerTemplate(CreateServerTemplate),
|
||||
CopyServerTemplate(CopyServerTemplate),
|
||||
DeleteServerTemplate(DeleteServerTemplate),
|
||||
UpdateServerTemplate(UpdateServerTemplate),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
DeleteRepo(DeleteRepo),
|
||||
UpdateRepo(UpdateRepo),
|
||||
|
||||
// ==== ALERTER ====
|
||||
CreateAlerter(CreateAlerter),
|
||||
CopyAlerter(CopyAlerter),
|
||||
DeleteAlerter(DeleteAlerter),
|
||||
UpdateAlerter(UpdateAlerter),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
CreateProcedure(CreateProcedure),
|
||||
CopyProcedure(CopyProcedure),
|
||||
DeleteProcedure(DeleteProcedure),
|
||||
UpdateProcedure(UpdateProcedure),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
RenameTag(RenameTag),
|
||||
UpdateTagsOnResource(UpdateTagsOnResource),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable(CreateVariable),
|
||||
UpdateVariableValue(UpdateVariableValue),
|
||||
UpdateVariableDescription(UpdateVariableDescription),
|
||||
DeleteVariable(DeleteVariable),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<WriteRequest>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let req_id = Uuid::new_v4();
|
||||
|
||||
let res = tokio::spawn(task(req_id, request, user))
|
||||
.await
|
||||
.context("failure in spawned task");
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} spawn error: {e:#}");
|
||||
}
|
||||
|
||||
Ok((TypedHeader(ContentType::json()), res??))
|
||||
}
|
||||
|
||||
#[instrument(name = "WriteRequest", skip(user))]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
user: User,
|
||||
) -> anyhow::Result<String> {
|
||||
info!(
|
||||
"/write request {req_id} | user: {} ({})",
|
||||
user.username, user.id
|
||||
);
|
||||
|
||||
let timer = Instant::now();
|
||||
|
||||
let res =
|
||||
State
|
||||
.resolve_request(request, user)
|
||||
.await
|
||||
.map_err(|e| match e {
|
||||
resolver_api::Error::Serialization(e) => {
|
||||
anyhow!("{e:?}").context("response serialization error")
|
||||
}
|
||||
resolver_api::Error::Inner(e) => e,
|
||||
});
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} error: {e:#}");
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
info!("/write request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res
|
||||
}
|
||||
308
bin/core/src/api/write/permissions.rs
Normal file
308
bin/core/src/api/write/permissions.rs
Normal file
@@ -0,0 +1,308 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
UpdatePermissionOnTarget, UpdatePermissionOnTargetResponse,
|
||||
UpdateUserBasePermissions, UpdateUserBasePermissionsResponse,
|
||||
},
|
||||
entities::{
|
||||
permission::{UserTarget, UserTargetVariant},
|
||||
update::{ResourceTarget, ResourceTargetVariant},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{doc, oid::ObjectId, Document},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<UpdateUserBasePermissions, User> for State {
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(self, admin))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateUserBasePermissions {
|
||||
user_id,
|
||||
enabled,
|
||||
create_servers,
|
||||
create_builds,
|
||||
}: UpdateUserBasePermissions,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UpdateUserBasePermissionsResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only"));
|
||||
}
|
||||
let user = find_one_by_id(&db_client().await.users, &user_id)
|
||||
.await
|
||||
.context("failed to query mongo for user")?
|
||||
.context("did not find user with given id")?;
|
||||
if user.admin {
|
||||
return Err(anyhow!(
|
||||
"cannot use this method to update other admins permissions"
|
||||
));
|
||||
}
|
||||
let mut update_doc = Document::new();
|
||||
if let Some(enabled) = enabled {
|
||||
update_doc.insert("enabled", enabled);
|
||||
}
|
||||
if let Some(create_servers) = create_servers {
|
||||
update_doc.insert("create_server_permissions", create_servers);
|
||||
}
|
||||
if let Some(create_builds) = create_builds {
|
||||
update_doc.insert("create_build_permissions", create_builds);
|
||||
}
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user_id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(UpdateUserBasePermissionsResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdatePermissionOnTarget, User> for State {
|
||||
#[instrument(name = "UpdatePermissionOnTarget", skip(self, admin))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdatePermissionOnTarget {
|
||||
user_target,
|
||||
resource_target,
|
||||
permission,
|
||||
}: UpdatePermissionOnTarget,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UpdatePermissionOnTargetResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only"));
|
||||
}
|
||||
|
||||
// Some extra checks if user target is an actual User
|
||||
if let UserTarget::User(user_id) = &user_target {
|
||||
let user = get_user(user_id).await?;
|
||||
if user.admin {
|
||||
return Err(anyhow!(
|
||||
"cannot use this method to update other admins permissions"
|
||||
));
|
||||
}
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("user not enabled"));
|
||||
}
|
||||
}
|
||||
|
||||
let (user_target_variant, user_target_id) =
|
||||
extract_user_target_with_validation(&user_target).await?;
|
||||
let (resource_variant, resource_id) =
|
||||
extract_resource_target_with_validation(&resource_target)
|
||||
.await?;
|
||||
|
||||
let (user_target_variant, resource_variant) =
|
||||
(user_target_variant.as_ref(), resource_variant.as_ref());
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.permissions
|
||||
.update_one(
|
||||
doc! {
|
||||
"user_target.type": user_target_variant,
|
||||
"user_target.id": &user_target_id,
|
||||
"resource_target.type": resource_variant,
|
||||
"resource_target.id": &resource_id
|
||||
},
|
||||
doc! {
|
||||
"$set": {
|
||||
"user_target.type": user_target_variant,
|
||||
"user_target.id": user_target_id,
|
||||
"resource_target.type": resource_variant,
|
||||
"resource_target.id": resource_id,
|
||||
"level": permission.as_ref(),
|
||||
}
|
||||
},
|
||||
UpdateOptions::builder().upsert(true).build(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(UpdatePermissionOnTargetResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
/// checks if inner id is actually a `name`, and replaces it with id if so.
|
||||
async fn extract_user_target_with_validation(
|
||||
user_target: &UserTarget,
|
||||
) -> anyhow::Result<(UserTargetVariant, String)> {
|
||||
match user_target {
|
||||
UserTarget::User(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "username": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for users")?
|
||||
.context("no matching user found")?
|
||||
.id;
|
||||
Ok((UserTargetVariant::User, id))
|
||||
}
|
||||
UserTarget::UserGroup(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.user_groups
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for user_groups")?
|
||||
.context("no matching user_group found")?
|
||||
.id;
|
||||
Ok((UserTargetVariant::UserGroup, id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// checks if inner id is actually a `name`, and replaces it with id if so.
|
||||
async fn extract_resource_target_with_validation(
|
||||
resource_target: &ResourceTarget,
|
||||
) -> anyhow::Result<(ResourceTargetVariant, String)> {
|
||||
match resource_target {
|
||||
ResourceTarget::System(_) => {
|
||||
let res = resource_target.extract_variant_id();
|
||||
Ok((res.0, res.1.clone()))
|
||||
}
|
||||
ResourceTarget::Build(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.builds
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for builds")?
|
||||
.context("no matching build found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Build, id))
|
||||
}
|
||||
ResourceTarget::Builder(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.builders
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for builders")?
|
||||
.context("no matching builder found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Builder, id))
|
||||
}
|
||||
ResourceTarget::Deployment(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.deployments
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for deployments")?
|
||||
.context("no matching deployment found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Deployment, id))
|
||||
}
|
||||
ResourceTarget::Server(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.servers
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.context("no matching server found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Server, id))
|
||||
}
|
||||
ResourceTarget::Repo(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.repos
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for repos")?
|
||||
.context("no matching repo found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Repo, id))
|
||||
}
|
||||
ResourceTarget::Alerter(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.alerters
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for alerters")?
|
||||
.context("no matching alerter found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Alerter, id))
|
||||
}
|
||||
ResourceTarget::Procedure(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.procedures
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for procedures")?
|
||||
.context("no matching procedure found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Procedure, id))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.await
|
||||
.server_templates
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for server templates")?
|
||||
.context("no matching server template found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ServerTemplate, id))
|
||||
}
|
||||
}
|
||||
}
|
||||
60
bin/core/src/api/write/procedure.rs
Normal file
60
bin/core/src/api/write/procedure.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, procedure::Procedure, user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateProcedure, User> for State {
|
||||
#[instrument(name = "CreateProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateProcedure { name, config }: CreateProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateProcedureResponse> {
|
||||
resource::create::<Procedure>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyProcedure, User> for State {
|
||||
#[instrument(name = "CopyProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyProcedure { name, id }: CopyProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<CopyProcedureResponse> {
|
||||
let Procedure { config, .. } =
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Procedure>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateProcedure, User> for State {
|
||||
#[instrument(name = "UpdateProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateProcedure { id, config }: UpdateProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateProcedureResponse> {
|
||||
resource::update::<Procedure>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteProcedure, User> for State {
|
||||
#[instrument(name = "DeleteProcedure", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteProcedure { id }: DeleteProcedure,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteProcedureResponse> {
|
||||
resource::delete::<Procedure>(&id, &user).await
|
||||
}
|
||||
}
|
||||
58
bin/core/src/api/write/repo.rs
Normal file
58
bin/core/src/api/write/repo.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{permission::PermissionLevel, repo::Repo, user::User},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateRepo, User> for State {
|
||||
#[instrument(name = "CreateRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateRepo { name, config }: CreateRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Repo> {
|
||||
resource::create::<Repo>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyRepo, User> for State {
|
||||
#[instrument(name = "CopyRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyRepo { name, id }: CopyRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Repo> {
|
||||
let Repo { config, .. } =
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Repo>(&name, config.into(), &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteRepo, User> for State {
|
||||
#[instrument(name = "DeleteRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteRepo { id }: DeleteRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Repo> {
|
||||
resource::delete::<Repo>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateRepo, User> for State {
|
||||
#[instrument(name = "UpdateRepo", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateRepo { id, config }: UpdateRepo,
|
||||
user: User,
|
||||
) -> anyhow::Result<Repo> {
|
||||
resource::update::<Repo>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
161
bin/core/src/api/write/server.rs
Normal file
161
bin/core/src/api/write/server.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use anyhow::Context;
|
||||
use monitor_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Update, UpdateStatus},
|
||||
user::User,
|
||||
Operation,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use serror::serialize_error_pretty;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateServer, User> for State {
|
||||
#[instrument(name = "CreateServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateServer { name, config }: CreateServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Server> {
|
||||
resource::create::<Server>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteServer, User> for State {
|
||||
#[instrument(name = "DeleteServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteServer { id }: DeleteServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Server> {
|
||||
resource::delete::<Server>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateServer, User> for State {
|
||||
#[instrument(name = "UpdateServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateServer { id, config }: UpdateServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Server> {
|
||||
resource::update::<Server>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RenameServer, User> for State {
|
||||
#[instrument(name = "RenameServer", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameServer { id, name }: RenameServer,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
let mut update =
|
||||
make_update(&server, Operation::RenameServer, &user);
|
||||
|
||||
update_one_by_id(&db_client().await.servers, &id, mungos::update::Update::Set(doc! { "name": &name, "updated_at": monitor_timestamp() }), None)
|
||||
.await
|
||||
.context("failed to update server on db. this name may already be taken.")?;
|
||||
update.push_simple_log(
|
||||
"rename server",
|
||||
format!("renamed server {id} from {} to {name}", server.name),
|
||||
);
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateNetwork, User> for State {
|
||||
#[instrument(name = "CreateNetwork", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateNetwork { server, name }: CreateNetwork,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::CreateNetwork, &user);
|
||||
update.status = UpdateStatus::InProgress;
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::CreateNetwork { name, driver: None })
|
||||
.await
|
||||
{
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update
|
||||
.push_error_log("create network", serialize_error_pretty(&e)),
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteNetwork, User> for State {
|
||||
#[instrument(name = "DeleteNetwork", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteNetwork { server, name }: DeleteNetwork,
|
||||
user: User,
|
||||
) -> anyhow::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::DeleteNetwork, &user);
|
||||
update.status = UpdateStatus::InProgress;
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::DeleteNetwork { name })
|
||||
.await
|
||||
{
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update
|
||||
.push_error_log("delete network", serialize_error_pretty(&e)),
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
61
bin/core/src/api/write/server_template.rs
Normal file
61
bin/core/src/api/write/server_template.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
|
||||
UpdateServerTemplate,
|
||||
},
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{resource, state::State};
|
||||
|
||||
impl Resolve<CreateServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateServerTemplate { name, config }: CreateServerTemplate,
|
||||
user: User,
|
||||
) -> anyhow::Result<ServerTemplate> {
|
||||
resource::create::<ServerTemplate>(&name, config, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CopyServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CopyServerTemplate { name, id }: CopyServerTemplate,
|
||||
user: User,
|
||||
) -> anyhow::Result<ServerTemplate> {
|
||||
let ServerTemplate { config, .. } =
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::create::<ServerTemplate>(&name, config.into(), &user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteServerTemplate { id }: DeleteServerTemplate,
|
||||
user: User,
|
||||
) -> anyhow::Result<ServerTemplate> {
|
||||
resource::delete::<ServerTemplate>(&id, &user).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateServerTemplate, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateServerTemplate { id, config }: UpdateServerTemplate,
|
||||
user: User,
|
||||
) -> anyhow::Result<ServerTemplate> {
|
||||
resource::update::<ServerTemplate>(&id, config, &user).await
|
||||
}
|
||||
}
|
||||
197
bin/core/src/api/write/tag.rs
Normal file
197
bin/core/src/api/write/tag.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateTag, DeleteTag, RenameTag, UpdateTagsOnResource,
|
||||
UpdateTagsOnResourceResponse,
|
||||
},
|
||||
entities::{
|
||||
alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
procedure::Procedure, repo::Repo, server::Server,
|
||||
server_template::ServerTemplate, tag::Tag,
|
||||
update::ResourceTarget, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{get_tag, get_tag_check_owner},
|
||||
resource,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateTag, User> for State {
|
||||
#[instrument(name = "CreateTag", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateTag { name }: CreateTag,
|
||||
user: User,
|
||||
) -> anyhow::Result<Tag> {
|
||||
if ObjectId::from_str(&name).is_ok() {
|
||||
return Err(anyhow!("tag name cannot be ObjectId"));
|
||||
}
|
||||
|
||||
let mut tag = Tag {
|
||||
id: Default::default(),
|
||||
name,
|
||||
owner: user.id.clone(),
|
||||
};
|
||||
|
||||
tag.id = db_client()
|
||||
.await
|
||||
.tags
|
||||
.insert_one(&tag, None)
|
||||
.await
|
||||
.context("failed to create tag on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
Ok(tag)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RenameTag, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameTag { id, name }: RenameTag,
|
||||
user: User,
|
||||
) -> anyhow::Result<Tag> {
|
||||
if ObjectId::from_str(&name).is_ok() {
|
||||
return Err(anyhow!("tag name cannot be ObjectId"));
|
||||
}
|
||||
|
||||
get_tag_check_owner(&id, &user).await?;
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.tags,
|
||||
&id,
|
||||
doc! { "$set": { "name": name } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to rename tag on db")?;
|
||||
|
||||
get_tag(&id).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteTag, User> for State {
|
||||
#[instrument(name = "DeleteTag", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteTag { id }: DeleteTag,
|
||||
user: User,
|
||||
) -> anyhow::Result<Tag> {
|
||||
let tag = get_tag_check_owner(&id, &user).await?;
|
||||
|
||||
tokio::try_join!(
|
||||
resource::remove_tag_from_all::<Server>(&id),
|
||||
resource::remove_tag_from_all::<Deployment>(&id),
|
||||
resource::remove_tag_from_all::<Build>(&id),
|
||||
resource::remove_tag_from_all::<Repo>(&id),
|
||||
resource::remove_tag_from_all::<Builder>(&id),
|
||||
resource::remove_tag_from_all::<Alerter>(&id),
|
||||
resource::remove_tag_from_all::<Procedure>(&id),
|
||||
resource::remove_tag_from_all::<ServerTemplate>(&id),
|
||||
)?;
|
||||
|
||||
delete_one_by_id(&db_client().await.tags, &id, None).await?;
|
||||
|
||||
Ok(tag)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateTagsOnResource, User> for State {
|
||||
#[instrument(name = "UpdateTagsOnResource", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateTagsOnResource { target, tags }: UpdateTagsOnResource,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateTagsOnResourceResponse> {
|
||||
match target {
|
||||
ResourceTarget::System(_) => return Err(anyhow!("")),
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::get_check_permissions::<Build>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Build>(&id, tags, user).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Builder>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Deployment>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get_check_permissions::<Server>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Server>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Repo>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Alerter>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<Procedure>(&id, tags, user).await?
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&id,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<ServerTemplate>(&id, tags, user)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
Ok(UpdateTagsOnResourceResponse {})
|
||||
}
|
||||
}
|
||||
187
bin/core/src/api/write/user.rs
Normal file
187
bin/core/src/api/write/user.rs
Normal file
@@ -0,0 +1,187 @@
|
||||
use std::{collections::VecDeque, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateServiceUser, CreateServiceUserResponse, PushRecentlyViewed,
|
||||
PushRecentlyViewedResponse, SetLastSeenUpdate,
|
||||
SetLastSeenUpdateResponse, UpdateServiceUserDescription,
|
||||
UpdateServiceUserDescriptionResponse,
|
||||
},
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
update::ResourceTarget,
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId, to_bson},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user,
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
impl Resolve<PushRecentlyViewed, User> for State {
|
||||
#[instrument(name = "PushRecentlyViewed", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
PushRecentlyViewed { resource }: PushRecentlyViewed,
|
||||
user: User,
|
||||
) -> anyhow::Result<PushRecentlyViewedResponse> {
|
||||
let user = get_user(&user.id).await?;
|
||||
|
||||
let (recents, id, field) = match resource {
|
||||
ResourceTarget::Server(id) => {
|
||||
(user.recent_servers, id, "recent_servers")
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
(user.recent_deployments, id, "recent_deployments")
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
(user.recent_builds, id, "recent_builds")
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
(user.recent_repos, id, "recent_repos")
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
(user.recent_procedures, id, "recent_procedures")
|
||||
}
|
||||
_ => return Ok(PushRecentlyViewedResponse {}),
|
||||
};
|
||||
|
||||
let mut recents = recents
|
||||
.into_iter()
|
||||
.filter(|_id| !id.eq(_id))
|
||||
.take(RECENTLY_VIEWED_MAX - 1)
|
||||
.collect::<VecDeque<_>>();
|
||||
recents.push_front(id);
|
||||
let update = doc! { field: to_bson(&recents)? };
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(update),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.with_context(|| format!("failed to update {field}"))?;
|
||||
|
||||
Ok(PushRecentlyViewedResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SetLastSeenUpdate, User> for State {
|
||||
#[instrument(name = "SetLastSeenUpdate", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
SetLastSeenUpdate {}: SetLastSeenUpdate,
|
||||
user: User,
|
||||
) -> anyhow::Result<SetLastSeenUpdateResponse> {
|
||||
update_one_by_id(
|
||||
&db_client().await.users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
"last_update_view": monitor_timestamp()
|
||||
}),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update user last_update_view")?;
|
||||
Ok(SetLastSeenUpdateResponse {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<CreateServiceUser, User> for State {
|
||||
#[instrument(name = "CreateServiceUser", skip(self, user))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateServiceUser {
|
||||
username,
|
||||
description,
|
||||
}: CreateServiceUser,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateServiceUserResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
if ObjectId::from_str(&username).is_ok() {
|
||||
return Err(anyhow!("username cannot be valid ObjectId"));
|
||||
}
|
||||
let config = UserConfig::Service { description };
|
||||
let mut user = User {
|
||||
id: Default::default(),
|
||||
username,
|
||||
config,
|
||||
enabled: true,
|
||||
admin: false,
|
||||
create_server_permissions: false,
|
||||
create_build_permissions: false,
|
||||
last_update_view: 0,
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
updated_at: monitor_timestamp(),
|
||||
};
|
||||
user.id = db_client()
|
||||
.await
|
||||
.users
|
||||
.insert_one(&user, None)
|
||||
.await
|
||||
.context("failed to create service user on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not object id")?
|
||||
.to_string();
|
||||
Ok(user)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateServiceUserDescription, User> for State {
|
||||
#[instrument(
|
||||
name = "UpdateServiceUserDescription",
|
||||
skip(self, user)
|
||||
)]
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateServiceUserDescription {
|
||||
username,
|
||||
description,
|
||||
}: UpdateServiceUserDescription,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateServiceUserDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("user not admin"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
let service_user = db
|
||||
.users
|
||||
.find_one(doc! { "username": &username }, None)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("no user with given username")?;
|
||||
let UserConfig::Service { .. } = &service_user.config else {
|
||||
return Err(anyhow!("user is not service user"));
|
||||
};
|
||||
db.users
|
||||
.update_one(
|
||||
doc! { "username": &username },
|
||||
doc! { "$set": { "config.data.description": description } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update user on db")?;
|
||||
db.users
|
||||
.find_one(doc! { "username": &username }, None)
|
||||
.await
|
||||
.context("failed to query db for user")?
|
||||
.context("user with username not found")
|
||||
}
|
||||
}
|
||||
245
bin/core/src/api/write/user_group.rs
Normal file
245
bin/core/src/api/write/user_group.rs
Normal file
@@ -0,0 +1,245 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
AddUserToUserGroup, CreateUserGroup, DeleteUserGroup,
|
||||
RemoveUserFromUserGroup, RenameUserGroup, SetUsersInUserGroup,
|
||||
},
|
||||
entities::{monitor_timestamp, user::User, user_group::UserGroup},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
find::find_collect,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::{db_client, State};
|
||||
|
||||
impl Resolve<CreateUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateUserGroup { name }: CreateUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
let user_group = UserGroup {
|
||||
id: Default::default(),
|
||||
users: Default::default(),
|
||||
updated_at: monitor_timestamp(),
|
||||
name,
|
||||
};
|
||||
let db = db_client().await;
|
||||
let id = db
|
||||
.user_groups
|
||||
.insert_one(user_group, None)
|
||||
.await
|
||||
.context("failed to create UserGroup on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
find_one_by_id(&db.user_groups, &id)
|
||||
.await
|
||||
.context("failed to query db for user groups")?
|
||||
.context("user group at id not found")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RenameUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
RenameUserGroup { id, name }: RenameUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
let db = db_client().await;
|
||||
update_one_by_id(
|
||||
&db.user_groups,
|
||||
&id,
|
||||
doc! { "$set": { "name": name } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to rename UserGroup on db")?;
|
||||
find_one_by_id(&db.user_groups, &id)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?
|
||||
.context("no user group with given id")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteUserGroup { id }: DeleteUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
let ug = find_one_by_id(&db.user_groups, &id)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?
|
||||
.context("no UserGroup found with given id")?;
|
||||
|
||||
delete_one_by_id(&db.user_groups, &id, None)
|
||||
.await
|
||||
.context("failed to delete UserGroup from db")?;
|
||||
|
||||
db.permissions
|
||||
.delete_many(doc! {
|
||||
"user_target.type": "UserGroup",
|
||||
"user_target.id": id,
|
||||
}, None)
|
||||
.await
|
||||
.context("failed to clean up UserGroups permissions. User Group has been deleted")?;
|
||||
|
||||
Ok(ug)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<AddUserToUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
AddUserToUserGroup { user_group, user }: AddUserToUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
let filter = match ObjectId::from_str(&user) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "username": &user },
|
||||
};
|
||||
let user = db
|
||||
.users
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query mongo for users")?
|
||||
.context("no matching user found")?;
|
||||
|
||||
let filter = match ObjectId::from_str(&user_group) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": &user_group },
|
||||
};
|
||||
db.user_groups
|
||||
.update_one(
|
||||
filter.clone(),
|
||||
doc! { "$addToSet": { "users": &user.id } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to add user to group on db")?;
|
||||
db.user_groups
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?
|
||||
.context("no user group with given id")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<RemoveUserFromUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
RemoveUserFromUserGroup {
|
||||
user_group,
|
||||
user,
|
||||
}: RemoveUserFromUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
let filter = match ObjectId::from_str(&user) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "username": &user },
|
||||
};
|
||||
let user = db
|
||||
.users
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query mongo for users")?
|
||||
.context("no matching user found")?;
|
||||
|
||||
let filter = match ObjectId::from_str(&user_group) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": &user_group },
|
||||
};
|
||||
db.user_groups
|
||||
.update_one(
|
||||
filter.clone(),
|
||||
doc! { "$pull": { "users": &user.id } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to add user to group on db")?;
|
||||
db.user_groups
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?
|
||||
.context("no user group with given id")
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<SetUsersInUserGroup, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
SetUsersInUserGroup { user_group, users }: SetUsersInUserGroup,
|
||||
admin: User,
|
||||
) -> anyhow::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("This call is admin-only"));
|
||||
}
|
||||
|
||||
let db = db_client().await;
|
||||
|
||||
let all_users = find_collect(&db.users, None, None)
|
||||
.await
|
||||
.context("failed to query db for users")?
|
||||
.into_iter()
|
||||
.map(|u| (u.username, u.id))
|
||||
.collect::<HashMap<_, _>>();
|
||||
|
||||
// Make sure all users are user ids
|
||||
let users = users
|
||||
.into_iter()
|
||||
.filter_map(|user| match ObjectId::from_str(&user) {
|
||||
Ok(_) => Some(user),
|
||||
Err(_) => all_users.get(&user).cloned(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let filter = match ObjectId::from_str(&user_group) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": &user_group },
|
||||
};
|
||||
db.user_groups
|
||||
.update_one(
|
||||
filter.clone(),
|
||||
doc! { "$set": { "users": users } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to add user to group on db")?;
|
||||
db.user_groups
|
||||
.find_one(filter, None)
|
||||
.await
|
||||
.context("failed to query db for UserGroups")?
|
||||
.context("no user group with given id")
|
||||
}
|
||||
}
|
||||
169
bin/core/src/api/write/variable.rs
Normal file
169
bin/core/src/api/write/variable.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::{
|
||||
api::write::{
|
||||
CreateVariable, CreateVariableResponse, DeleteVariable,
|
||||
DeleteVariableResponse, UpdateVariableDescription,
|
||||
UpdateVariableDescriptionResponse, UpdateVariableValue,
|
||||
UpdateVariableValueResponse,
|
||||
},
|
||||
entities::{
|
||||
update::ResourceTarget, user::User, variable::Variable, Operation,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
query::get_variable,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
state::{db_client, State},
|
||||
};
|
||||
|
||||
impl Resolve<CreateVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateVariable {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
}: CreateVariable,
|
||||
user: User,
|
||||
) -> anyhow::Result<CreateVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
|
||||
let variable = Variable {
|
||||
name,
|
||||
value,
|
||||
description,
|
||||
};
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.insert_one(&variable, None)
|
||||
.await
|
||||
.context("failed to create variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::CreateVariable,
|
||||
&user,
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("create variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
get_variable(&variable.name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableValue, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableValue { name, value }: UpdateVariableValue,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableValueResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
|
||||
let variable = get_variable(&name).await?;
|
||||
|
||||
if value == variable.value {
|
||||
return Err(anyhow!("no change"));
|
||||
}
|
||||
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
doc! { "$set": { "value": &value } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable value on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::UpdateVariableValue,
|
||||
&user,
|
||||
);
|
||||
|
||||
update.push_simple_log(
|
||||
"update variable value",
|
||||
format!(
|
||||
"<span class=\"text-muted-foreground\">variable</span>: '{name}'\n<span class=\"text-muted-foreground\">from</span>: <span class=\"text-red-500\">{}</span>\n<span class=\"text-muted-foreground\">to</span>: <span class=\"text-green-500\">{value}</span>",
|
||||
variable.value
|
||||
),
|
||||
);
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<UpdateVariableDescription, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
UpdateVariableDescription { name, description }: UpdateVariableDescription,
|
||||
user: User,
|
||||
) -> anyhow::Result<UpdateVariableDescriptionResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.update_one(
|
||||
doc! { "name": &name },
|
||||
doc! { "$set": { "description": &description } },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update variable description on db")?;
|
||||
get_variable(&name).await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<DeleteVariable, User> for State {
|
||||
async fn resolve(
|
||||
&self,
|
||||
DeleteVariable { name }: DeleteVariable,
|
||||
user: User,
|
||||
) -> anyhow::Result<DeleteVariableResponse> {
|
||||
if !user.admin {
|
||||
return Err(anyhow!("only admins can create variables"));
|
||||
}
|
||||
let variable = get_variable(&name).await?;
|
||||
db_client()
|
||||
.await
|
||||
.variables
|
||||
.delete_one(doc! { "name": &name }, None)
|
||||
.await
|
||||
.context("failed to delete variable on db")?;
|
||||
|
||||
let mut update = make_update(
|
||||
ResourceTarget::system(),
|
||||
Operation::DeleteVariable,
|
||||
&user,
|
||||
);
|
||||
|
||||
update
|
||||
.push_simple_log("delete variable", format!("{variable:#?}"));
|
||||
update.finalize();
|
||||
|
||||
add_update(update).await?;
|
||||
|
||||
Ok(variable)
|
||||
}
|
||||
}
|
||||
229
bin/core/src/auth/github/client.rs
Normal file
229
bin/core/src/auth/github/client.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::{random_string, STATE_PREFIX_LENGTH},
|
||||
config::core_config,
|
||||
};
|
||||
|
||||
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
|
||||
static GITHUB_OAUTH_CLIENT: OnceLock<Option<GithubOauthClient>> =
|
||||
OnceLock::new();
|
||||
GITHUB_OAUTH_CLIENT
|
||||
.get_or_init(|| GithubOauthClient::new(core_config()))
|
||||
}
|
||||
|
||||
pub struct GithubOauthClient {
|
||||
http: reqwest::Client,
|
||||
client_id: String,
|
||||
client_secret: String,
|
||||
redirect_uri: String,
|
||||
scopes: String,
|
||||
states: Mutex<Vec<String>>,
|
||||
user_agent: String,
|
||||
}
|
||||
|
||||
impl GithubOauthClient {
|
||||
pub fn new(
|
||||
CoreConfig {
|
||||
github_oauth:
|
||||
OauthCredentials {
|
||||
enabled,
|
||||
id,
|
||||
secret,
|
||||
},
|
||||
host,
|
||||
..
|
||||
}: &CoreConfig,
|
||||
) -> Option<GithubOauthClient> {
|
||||
if !enabled {
|
||||
return None;
|
||||
}
|
||||
if host.is_empty() {
|
||||
warn!("github oauth is enabled, but 'config.host' is not configured");
|
||||
return None;
|
||||
}
|
||||
if id.is_empty() {
|
||||
warn!("github oauth is enabled, but 'config.github_oauth.id' is not configured");
|
||||
return None;
|
||||
}
|
||||
if secret.is_empty() {
|
||||
warn!("github oauth is enabled, but 'config.github_oauth.secret' is not configured");
|
||||
return None;
|
||||
}
|
||||
GithubOauthClient {
|
||||
http: reqwest::Client::new(),
|
||||
client_id: id.clone(),
|
||||
client_secret: secret.clone(),
|
||||
redirect_uri: format!("{host}/auth/github/callback"),
|
||||
user_agent: Default::default(),
|
||||
scopes: Default::default(),
|
||||
states: Default::default(),
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_login_redirect_url(
|
||||
&self,
|
||||
redirect: Option<String>,
|
||||
) -> String {
|
||||
let state_prefix = random_string(STATE_PREFIX_LENGTH);
|
||||
let state = match redirect {
|
||||
Some(redirect) => format!("{state_prefix}{redirect}"),
|
||||
None => state_prefix,
|
||||
};
|
||||
let redirect_url = format!(
|
||||
"https://github.com/login/oauth/authorize?state={state}&client_id={}&redirect_uri={}&scope={}",
|
||||
self.client_id, self.redirect_uri, self.scopes
|
||||
);
|
||||
let mut states = self.states.lock().await;
|
||||
states.push(state);
|
||||
redirect_url
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn check_state(&self, state: &str) -> bool {
|
||||
let mut contained = false;
|
||||
self.states.lock().await.retain(|s| {
|
||||
if s.as_str() == state {
|
||||
contained = true;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
contained
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_access_token(
|
||||
&self,
|
||||
code: &str,
|
||||
) -> anyhow::Result<AccessTokenResponse> {
|
||||
self
|
||||
.post::<(), _>(
|
||||
"https://github.com/login/oauth/access_token",
|
||||
&[
|
||||
("client_id", self.client_id.as_str()),
|
||||
("client_secret", self.client_secret.as_str()),
|
||||
("redirect_uri", self.redirect_uri.as_str()),
|
||||
("code", code),
|
||||
],
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to get github access token using code")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_github_user(
|
||||
&self,
|
||||
token: &str,
|
||||
) -> anyhow::Result<GithubUserResponse> {
|
||||
self
|
||||
.get("https://api.github.com/user", &[], Some(token))
|
||||
.await
|
||||
.context("failed to get github user using access token")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn get<R: DeserializeOwned>(
|
||||
&self,
|
||||
endpoint: &str,
|
||||
query: &[(&str, &str)],
|
||||
bearer_token: Option<&str>,
|
||||
) -> anyhow::Result<R> {
|
||||
let mut req = self
|
||||
.http
|
||||
.get(endpoint)
|
||||
.query(query)
|
||||
.header("User-Agent", &self.user_agent);
|
||||
|
||||
if let Some(bearer_token) = bearer_token {
|
||||
req =
|
||||
req.header("Authorization", format!("Bearer {bearer_token}"));
|
||||
}
|
||||
|
||||
let res = req.send().await.context("failed to reach github")?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status == StatusCode::OK {
|
||||
let body = res
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse body into expected type")?;
|
||||
Ok(body)
|
||||
} else {
|
||||
let text = res.text().await.context(format!(
|
||||
"status: {status} | failed to get response text"
|
||||
))?;
|
||||
Err(anyhow!("status: {status} | text: {text}"))
|
||||
}
|
||||
}
|
||||
|
||||
async fn post<B: Serialize, R: DeserializeOwned>(
|
||||
&self,
|
||||
endpoint: &str,
|
||||
query: &[(&str, &str)],
|
||||
body: Option<&B>,
|
||||
bearer_token: Option<&str>,
|
||||
) -> anyhow::Result<R> {
|
||||
let mut req = self
|
||||
.http
|
||||
.post(endpoint)
|
||||
.query(query)
|
||||
.header("Accept", "application/json")
|
||||
.header("User-Agent", &self.user_agent);
|
||||
|
||||
if let Some(body) = body {
|
||||
req = req.json(body);
|
||||
}
|
||||
|
||||
if let Some(bearer_token) = bearer_token {
|
||||
req =
|
||||
req.header("Authorization", format!("Bearer {bearer_token}"));
|
||||
}
|
||||
|
||||
let res = req.send().await.context("failed to reach github")?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status == StatusCode::OK {
|
||||
let body = res
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse POST body into expected type")?;
|
||||
Ok(body)
|
||||
} else {
|
||||
let text = res.text().await.with_context(|| format!(
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
Err(anyhow!("method: POST | status: {status} | text: {text}"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct AccessTokenResponse {
|
||||
pub access_token: String,
|
||||
pub scope: String,
|
||||
pub token_type: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct GithubUserResponse {
|
||||
pub login: String,
|
||||
pub id: u128,
|
||||
pub avatar_url: String,
|
||||
pub email: Option<String>,
|
||||
}
|
||||
122
bin/core/src/auth/github/mod.rs
Normal file
122
bin/core/src/auth/github/mod.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use axum::{
|
||||
extract::Query, response::Redirect, routing::get, Router,
|
||||
};
|
||||
use monitor_client::entities::{
|
||||
monitor_timestamp,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
use self::client::github_oauth_client;
|
||||
|
||||
use super::{RedirectQuery, STATE_PREFIX_LENGTH};
|
||||
|
||||
pub mod client;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/login",
|
||||
get(|Query(query): Query<RedirectQuery>| async {
|
||||
Redirect::to(
|
||||
&github_oauth_client()
|
||||
.as_ref()
|
||||
// OK: the router is only mounted in case that the client is populated
|
||||
.unwrap()
|
||||
.get_login_redirect_url(query.redirect)
|
||||
.await,
|
||||
)
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/callback",
|
||||
get(|query| async {
|
||||
callback(query).await.status_code(StatusCode::UNAUTHORIZED)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct CallbackQuery {
|
||||
state: String,
|
||||
code: String,
|
||||
}
|
||||
|
||||
#[instrument(name = "GithubCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
let client = github_oauth_client().as_ref().unwrap();
|
||||
if !client.check_state(&query.state).await {
|
||||
return Err(anyhow!("state mismatch"));
|
||||
}
|
||||
let token = client.get_access_token(&query.code).await?;
|
||||
let github_user =
|
||||
client.get_github_user(&token.access_token).await?;
|
||||
let github_id = github_user.id.to_string();
|
||||
let db_client = db_client().await;
|
||||
let user = db_client
|
||||
.users
|
||||
.find_one(doc! { "config.data.github_id": &github_id }, None)
|
||||
.await
|
||||
.context("failed at find user query from mongo")?;
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client()
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = monitor_timestamp();
|
||||
let no_users_exist =
|
||||
db_client.users.find_one(None, None).await?.is_none();
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: github_user.login,
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Github {
|
||||
github_id,
|
||||
avatar: github_user.avatar_url,
|
||||
},
|
||||
};
|
||||
let user_id = db_client
|
||||
.users
|
||||
.insert_one(user, None)
|
||||
.await
|
||||
.context("failed to create user on mongo")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
jwt_client()
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
}
|
||||
};
|
||||
let exchange_token = jwt_client().create_exchange_token(jwt).await;
|
||||
let redirect = &query.state[STATE_PREFIX_LENGTH..];
|
||||
let redirect_url = if redirect.is_empty() {
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
} else {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
};
|
||||
Ok(Redirect::to(&redirect_url))
|
||||
}
|
||||
198
bin/core/src/auth/google/client.rs
Normal file
198
bin/core/src/auth/google/client.rs
Normal file
@@ -0,0 +1,198 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use jwt::Token;
|
||||
use monitor_client::entities::config::core::{CoreConfig, OauthCredentials};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{de::DeserializeOwned, Deserialize};
|
||||
use serde_json::Value;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::{random_string, STATE_PREFIX_LENGTH},
|
||||
config::core_config,
|
||||
};
|
||||
|
||||
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
|
||||
static GOOGLE_OAUTH_CLIENT: OnceLock<Option<GoogleOauthClient>> =
|
||||
OnceLock::new();
|
||||
GOOGLE_OAUTH_CLIENT
|
||||
.get_or_init(|| GoogleOauthClient::new(core_config()))
|
||||
}
|
||||
|
||||
pub struct GoogleOauthClient {
|
||||
http: reqwest::Client,
|
||||
client_id: String,
|
||||
client_secret: String,
|
||||
redirect_uri: String,
|
||||
scopes: String,
|
||||
states: Mutex<Vec<String>>,
|
||||
user_agent: String,
|
||||
}
|
||||
|
||||
impl GoogleOauthClient {
|
||||
pub fn new(
|
||||
CoreConfig {
|
||||
google_oauth:
|
||||
OauthCredentials {
|
||||
enabled,
|
||||
id,
|
||||
secret,
|
||||
},
|
||||
host,
|
||||
..
|
||||
}: &CoreConfig,
|
||||
) -> Option<GoogleOauthClient> {
|
||||
if !enabled {
|
||||
return None;
|
||||
}
|
||||
if host.is_empty() {
|
||||
warn!("google oauth is enabled, but 'config.host' is not configured");
|
||||
return None;
|
||||
}
|
||||
if id.is_empty() {
|
||||
warn!("google oauth is enabled, but 'config.google_oauth.id' is not configured");
|
||||
return None;
|
||||
}
|
||||
if secret.is_empty() {
|
||||
warn!("google oauth is enabled, but 'config.google_oauth.secret' is not configured");
|
||||
return None;
|
||||
}
|
||||
let scopes = urlencoding::encode(
|
||||
&[
|
||||
"https://www.googleapis.com/auth/userinfo.profile",
|
||||
"https://www.googleapis.com/auth/userinfo.email",
|
||||
]
|
||||
.join(" "),
|
||||
)
|
||||
.to_string();
|
||||
GoogleOauthClient {
|
||||
http: Default::default(),
|
||||
client_id: id.clone(),
|
||||
client_secret: secret.clone(),
|
||||
redirect_uri: format!("{host}/auth/google/callback"),
|
||||
user_agent: String::from("monitor"),
|
||||
states: Default::default(),
|
||||
scopes,
|
||||
}
|
||||
.into()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_login_redirect_url(
|
||||
&self,
|
||||
redirect: Option<String>,
|
||||
) -> String {
|
||||
let state_prefix = random_string(STATE_PREFIX_LENGTH);
|
||||
let state = match redirect {
|
||||
Some(redirect) => format!("{state_prefix}{redirect}"),
|
||||
None => state_prefix,
|
||||
};
|
||||
let redirect_url = format!(
|
||||
"https://accounts.google.com/o/oauth2/v2/auth?response_type=code&state={state}&client_id={}&redirect_uri={}&scope={}",
|
||||
self.client_id, self.redirect_uri, self.scopes
|
||||
);
|
||||
let mut states = self.states.lock().await;
|
||||
states.push(state);
|
||||
redirect_url
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn check_state(&self, state: &str) -> bool {
|
||||
let mut contained = false;
|
||||
self.states.lock().await.retain(|s| {
|
||||
if s.as_str() == state {
|
||||
contained = true;
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
});
|
||||
contained
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_access_token(
|
||||
&self,
|
||||
code: &str,
|
||||
) -> anyhow::Result<AccessTokenResponse> {
|
||||
self
|
||||
.post::<_>(
|
||||
"https://oauth2.googleapis.com/token",
|
||||
&[
|
||||
("client_id", self.client_id.as_str()),
|
||||
("client_secret", self.client_secret.as_str()),
|
||||
("redirect_uri", self.redirect_uri.as_str()),
|
||||
("code", code),
|
||||
("grant_type", "authorization_code"),
|
||||
],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to get google access token using code")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn get_google_user(
|
||||
&self,
|
||||
id_token: &str,
|
||||
) -> anyhow::Result<GoogleUser> {
|
||||
let t: Token<Value, GoogleUser, jwt::Unverified> =
|
||||
Token::parse_unverified(id_token)
|
||||
.context("failed to parse id_token")?;
|
||||
Ok(t.claims().to_owned())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn post<R: DeserializeOwned>(
|
||||
&self,
|
||||
endpoint: &str,
|
||||
body: &[(&str, &str)],
|
||||
bearer_token: Option<&str>,
|
||||
) -> anyhow::Result<R> {
|
||||
let mut req = self
|
||||
.http
|
||||
.post(endpoint)
|
||||
.form(body)
|
||||
.header("Accept", "application/json")
|
||||
.header("User-Agent", &self.user_agent);
|
||||
|
||||
if let Some(bearer_token) = bearer_token {
|
||||
req =
|
||||
req.header("Authorization", format!("Bearer {bearer_token}"));
|
||||
}
|
||||
|
||||
let res = req.send().await.context("failed to reach google")?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status == StatusCode::OK {
|
||||
let body = res
|
||||
.json()
|
||||
.await
|
||||
.context("failed to parse POST body into expected type")?;
|
||||
Ok(body)
|
||||
} else {
|
||||
let text = res.text().await.context(format!(
|
||||
"method: POST | status: {status} | failed to get response text"
|
||||
))?;
|
||||
Err(anyhow!("method: POST | status: {status} | text: {text}"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct AccessTokenResponse {
|
||||
pub access_token: String,
|
||||
pub id_token: String,
|
||||
pub scope: String,
|
||||
pub token_type: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct GoogleUser {
|
||||
#[serde(rename = "sub")]
|
||||
pub id: String,
|
||||
pub email: String,
|
||||
pub picture: String,
|
||||
}
|
||||
137
bin/core/src/auth/google/mod.rs
Normal file
137
bin/core/src/auth/google/mod.rs
Normal file
@@ -0,0 +1,137 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{
|
||||
extract::Query, response::Redirect, routing::get, Router,
|
||||
};
|
||||
use monitor_client::entities::user::{User, UserConfig};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
use self::client::google_oauth_client;
|
||||
|
||||
use super::{RedirectQuery, STATE_PREFIX_LENGTH};
|
||||
|
||||
pub mod client;
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route(
|
||||
"/login",
|
||||
get(|Query(query): Query<RedirectQuery>| async move {
|
||||
Redirect::to(
|
||||
&google_oauth_client()
|
||||
.as_ref()
|
||||
// OK: its not mounted unless the client is populated
|
||||
.unwrap()
|
||||
.get_login_redirect_url(query.redirect)
|
||||
.await,
|
||||
)
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/callback",
|
||||
get(|query| async {
|
||||
callback(query).await.status_code(StatusCode::UNAUTHORIZED)
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct CallbackQuery {
|
||||
state: Option<String>,
|
||||
code: Option<String>,
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(name = "GoogleCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
// Safe: the method is only called after the client is_some
|
||||
let client = google_oauth_client().as_ref().unwrap();
|
||||
if let Some(error) = query.error {
|
||||
return Err(anyhow!("auth error from google: {error}"));
|
||||
}
|
||||
let state = query
|
||||
.state
|
||||
.context("callback query does not contain state")?;
|
||||
if !client.check_state(&state).await {
|
||||
return Err(anyhow!("state mismatch"));
|
||||
}
|
||||
let token = client
|
||||
.get_access_token(
|
||||
&query.code.context("callback query does not contain code")?,
|
||||
)
|
||||
.await?;
|
||||
let google_user = client.get_google_user(&token.id_token)?;
|
||||
let google_id = google_user.id.to_string();
|
||||
let db_client = db_client().await;
|
||||
let user = db_client
|
||||
.users
|
||||
.find_one(doc! { "config.data.google_id": &google_id }, None)
|
||||
.await
|
||||
.context("failed at find user query from mongo")?;
|
||||
let jwt = match user {
|
||||
Some(user) => jwt_client()
|
||||
.generate(user.id)
|
||||
.context("failed to generate jwt")?,
|
||||
None => {
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
let no_users_exist =
|
||||
db_client.users.find_one(None, None).await?.is_none();
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username: google_user
|
||||
.email
|
||||
.split('@')
|
||||
.collect::<Vec<&str>>()
|
||||
.first()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Google {
|
||||
google_id,
|
||||
avatar: google_user.picture,
|
||||
},
|
||||
};
|
||||
let user_id = db_client
|
||||
.users
|
||||
.insert_one(user, None)
|
||||
.await
|
||||
.context("failed to create user on mongo")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
jwt_client()
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt")?
|
||||
}
|
||||
};
|
||||
let exchange_token = jwt_client().create_exchange_token(jwt).await;
|
||||
let redirect = &state[STATE_PREFIX_LENGTH..];
|
||||
let redirect_url = if redirect.is_empty() {
|
||||
format!("{}?token={exchange_token}", core_config().host)
|
||||
} else {
|
||||
let splitter = if redirect.contains('?') { '&' } else { '?' };
|
||||
format!("{}{splitter}token={exchange_token}", redirect)
|
||||
};
|
||||
Ok(Redirect::to(&redirect_url))
|
||||
}
|
||||
89
bin/core/src/auth/jwt.rs
Normal file
89
bin/core/src/auth/jwt.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::{
|
||||
get_timelength_in_ms, unix_timestamp_ms, Timelength,
|
||||
};
|
||||
use hmac::{Hmac, Mac};
|
||||
use jwt::SignWithKey;
|
||||
use monitor_client::entities::config::core::CoreConfig;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use super::random_string;
|
||||
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (String, u128)>>;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct JwtClaims {
|
||||
pub id: String,
|
||||
pub iat: u128,
|
||||
pub exp: u128,
|
||||
}
|
||||
|
||||
pub struct JwtClient {
|
||||
pub key: Hmac<Sha256>,
|
||||
valid_for_ms: u128,
|
||||
exchange_tokens: ExchangeTokenMap,
|
||||
}
|
||||
|
||||
impl JwtClient {
|
||||
pub fn new(config: &CoreConfig) -> JwtClient {
|
||||
let key = Hmac::new_from_slice(random_string(40).as_bytes())
|
||||
.expect("failed at taking HmacSha256 of jwt secret");
|
||||
JwtClient {
|
||||
key,
|
||||
valid_for_ms: get_timelength_in_ms(
|
||||
config.jwt_valid_for.to_string().parse().unwrap(),
|
||||
),
|
||||
exchange_tokens: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate(&self, user_id: String) -> anyhow::Result<String> {
|
||||
let iat = unix_timestamp_ms();
|
||||
let exp = iat + self.valid_for_ms;
|
||||
let claims = JwtClaims {
|
||||
id: user_id,
|
||||
iat,
|
||||
exp,
|
||||
};
|
||||
let jwt = claims
|
||||
.sign_with_key(&self.key)
|
||||
.context("failed at signing claim")?;
|
||||
Ok(jwt)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn create_exchange_token(&self, jwt: String) -> String {
|
||||
let exchange_token = random_string(40);
|
||||
self.exchange_tokens.lock().await.insert(
|
||||
exchange_token.clone(),
|
||||
(
|
||||
jwt,
|
||||
unix_timestamp_ms()
|
||||
+ get_timelength_in_ms(Timelength::OneMinute),
|
||||
),
|
||||
);
|
||||
exchange_token
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn redeem_exchange_token(
|
||||
&self,
|
||||
exchange_token: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let (jwt, valid_until) = self
|
||||
.exchange_tokens
|
||||
.lock()
|
||||
.await
|
||||
.remove(exchange_token)
|
||||
.context("invalid exchange token: unrecognized")?;
|
||||
if unix_timestamp_ms() < valid_until {
|
||||
Ok(jwt)
|
||||
} else {
|
||||
Err(anyhow!("invalid exchange token: expired"))
|
||||
}
|
||||
}
|
||||
}
|
||||
134
bin/core/src/auth/local.rs
Normal file
134
bin/core/src/auth/local.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::http::HeaderMap;
|
||||
use monitor_client::{
|
||||
api::auth::{
|
||||
CreateLocalUser, CreateLocalUserResponse, LoginLocalUser,
|
||||
LoginLocalUserResponse,
|
||||
},
|
||||
entities::user::{User, UserConfig},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
state::State,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<CreateLocalUser, HeaderMap> for State {
|
||||
#[instrument(name = "CreateLocalUser", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
CreateLocalUser { username, password }: CreateLocalUser,
|
||||
_: HeaderMap,
|
||||
) -> anyhow::Result<CreateLocalUserResponse> {
|
||||
if !core_config().local_auth {
|
||||
return Err(anyhow!("local auth is not enabled"));
|
||||
}
|
||||
|
||||
if username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string"));
|
||||
}
|
||||
|
||||
if ObjectId::from_str(&username).is_ok() {
|
||||
return Err(anyhow!("username cannot be valid ObjectId"));
|
||||
}
|
||||
|
||||
let password = bcrypt::hash(password, BCRYPT_COST)
|
||||
.context("failed to hash password")?;
|
||||
|
||||
let no_users_exist = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(None, None)
|
||||
.await?
|
||||
.is_none();
|
||||
|
||||
let ts = unix_timestamp_ms() as i64;
|
||||
|
||||
let user = User {
|
||||
id: Default::default(),
|
||||
username,
|
||||
enabled: no_users_exist,
|
||||
admin: no_users_exist,
|
||||
create_server_permissions: no_users_exist,
|
||||
create_build_permissions: no_users_exist,
|
||||
updated_at: ts,
|
||||
last_update_view: 0,
|
||||
recent_servers: Vec::new(),
|
||||
recent_deployments: Vec::new(),
|
||||
recent_builds: Vec::new(),
|
||||
recent_repos: Vec::new(),
|
||||
recent_procedures: Vec::new(),
|
||||
config: UserConfig::Local { password },
|
||||
};
|
||||
|
||||
let user_id = db_client()
|
||||
.await
|
||||
.users
|
||||
.insert_one(user, None)
|
||||
.await
|
||||
.context("failed to create user")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted_id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
let jwt = jwt_client()
|
||||
.generate(user_id)
|
||||
.context("failed to generate jwt for user")?;
|
||||
|
||||
Ok(CreateLocalUserResponse { jwt })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<LoginLocalUser, HeaderMap> for State {
|
||||
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
&self,
|
||||
LoginLocalUser { username, password }: LoginLocalUser,
|
||||
_: HeaderMap,
|
||||
) -> anyhow::Result<LoginLocalUserResponse> {
|
||||
if !core_config().local_auth {
|
||||
return Err(anyhow!("local auth is not enabled"));
|
||||
}
|
||||
|
||||
let user = db_client()
|
||||
.await
|
||||
.users
|
||||
.find_one(doc! { "username": &username }, None)
|
||||
.await
|
||||
.context("failed at db query for users")?
|
||||
.with_context(|| {
|
||||
format!("did not find user with username {username}")
|
||||
})?;
|
||||
|
||||
let UserConfig::Local {
|
||||
password: user_pw_hash,
|
||||
} = user.config
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"non-local auth users can not log in with a password"
|
||||
));
|
||||
};
|
||||
|
||||
let verified = bcrypt::verify(password, &user_pw_hash)
|
||||
.context("failed at verify password")?;
|
||||
|
||||
if !verified {
|
||||
return Err(anyhow!("invalid credentials"));
|
||||
}
|
||||
|
||||
let jwt = jwt_client()
|
||||
.generate(user.id)
|
||||
.context("failed at generating jwt for user")?;
|
||||
|
||||
Ok(LoginLocalUserResponse { jwt })
|
||||
}
|
||||
}
|
||||
165
bin/core/src/auth/mod.rs
Normal file
165
bin/core/src/auth/mod.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
use ::jwt::VerifyWithKey;
|
||||
use anyhow::{anyhow, Context};
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use axum::{
|
||||
extract::Request, http::HeaderMap, middleware::Next,
|
||||
response::Response,
|
||||
};
|
||||
use monitor_client::entities::{monitor_timestamp, user::User};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use rand::{distributions::Alphanumeric, thread_rng, Rng};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_user,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
use self::jwt::JwtClaims;
|
||||
|
||||
pub mod github;
|
||||
pub mod google;
|
||||
pub mod jwt;
|
||||
|
||||
mod local;
|
||||
|
||||
const STATE_PREFIX_LENGTH: usize = 20;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct RedirectQuery {
|
||||
pub redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_request(
|
||||
headers: HeaderMap,
|
||||
mut req: Request,
|
||||
next: Next,
|
||||
) -> serror::Result<Response> {
|
||||
let user = authenticate_check_enabled(&headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
req.extensions_mut().insert(user);
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
pub fn random_string(length: usize) -> String {
|
||||
thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(length)
|
||||
.map(char::from)
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn get_user_id_from_headers(
|
||||
headers: &HeaderMap,
|
||||
) -> anyhow::Result<String> {
|
||||
match (
|
||||
headers.get("authorization"),
|
||||
headers.get("x-api-key"),
|
||||
headers.get("x-api-secret"),
|
||||
) {
|
||||
(Some(jwt), _, _) => {
|
||||
// USE JWT
|
||||
let jwt = jwt.to_str().context("jwt is not str")?;
|
||||
auth_jwt_get_user_id(jwt)
|
||||
.await
|
||||
.context("failed to authenticate jwt")
|
||||
}
|
||||
(None, Some(key), Some(secret)) => {
|
||||
// USE API KEY / SECRET
|
||||
let key = key.to_str().context("key is not str")?;
|
||||
let secret = secret.to_str().context("secret is not str")?;
|
||||
auth_api_key_get_user_id(key, secret)
|
||||
.await
|
||||
.context("failed to authenticate api key")
|
||||
}
|
||||
_ => {
|
||||
// AUTH FAIL
|
||||
Err(anyhow!("must attach either AUTHORIZATION header with jwt OR pass X-API-KEY and X-API-SECRET"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn authenticate_check_enabled(
|
||||
headers: &HeaderMap,
|
||||
) -> anyhow::Result<User> {
|
||||
let user_id = get_user_id_from_headers(headers).await?;
|
||||
let user = get_user(&user_id).await?;
|
||||
if user.enabled {
|
||||
Ok(user)
|
||||
} else {
|
||||
Err(anyhow!("user not enabled"))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_jwt_get_user_id(
|
||||
jwt: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let claims: JwtClaims = jwt
|
||||
.verify_with_key(&jwt_client().key)
|
||||
.context("failed to verify claims")?;
|
||||
if claims.exp > unix_timestamp_ms() {
|
||||
Ok(claims.id)
|
||||
} else {
|
||||
Err(anyhow!("token has expired"))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_jwt_check_enabled(
|
||||
jwt: &str,
|
||||
) -> anyhow::Result<User> {
|
||||
let user_id = auth_jwt_get_user_id(jwt).await?;
|
||||
check_enabled(user_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_api_key_get_user_id(
|
||||
key: &str,
|
||||
secret: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
let key = db_client()
|
||||
.await
|
||||
.api_keys
|
||||
.find_one(doc! { "key": key }, None)
|
||||
.await
|
||||
.context("failed to query db")?
|
||||
.context("no api key matching key")?;
|
||||
if key.expires != 0 && key.expires < monitor_timestamp() {
|
||||
return Err(anyhow!("api key expired"));
|
||||
}
|
||||
if bcrypt::verify(secret, &key.secret)
|
||||
.context("failed to verify secret hash")?
|
||||
{
|
||||
// secret matches
|
||||
Ok(key.user_id)
|
||||
} else {
|
||||
// secret mismatch
|
||||
Err(anyhow!("invalid api secret"))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_api_key_check_enabled(
|
||||
key: &str,
|
||||
secret: &str,
|
||||
) -> anyhow::Result<User> {
|
||||
let user_id = auth_api_key_get_user_id(key, secret).await?;
|
||||
check_enabled(user_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn check_enabled(user_id: String) -> anyhow::Result<User> {
|
||||
let user = get_user(&user_id).await?;
|
||||
if user.enabled {
|
||||
Ok(user)
|
||||
} else {
|
||||
Err(anyhow!("user not enabled"))
|
||||
}
|
||||
}
|
||||
1059
bin/core/src/cloud/aws.rs
Normal file
1059
bin/core/src/cloud/aws.rs
Normal file
File diff suppressed because it is too large
Load Diff
7
bin/core/src/cloud/mod.rs
Normal file
7
bin/core/src/cloud/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
||||
pub mod aws;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BuildCleanupData {
|
||||
Server { repo_name: String },
|
||||
Aws { instance_id: String, region: String },
|
||||
}
|
||||
124
bin/core/src/config.rs
Normal file
124
bin/core/src/config.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::Context;
|
||||
use merge_config_files::parse_config_file;
|
||||
use monitor_client::entities::config::core::{CoreConfig, Env};
|
||||
use serde::Deserialize;
|
||||
|
||||
pub fn frontend_path() -> &'static String {
|
||||
#[derive(Deserialize)]
|
||||
struct FrontendEnv {
|
||||
#[serde(default = "default_frontend_path")]
|
||||
monitor_frontend_path: String,
|
||||
}
|
||||
|
||||
fn default_frontend_path() -> String {
|
||||
"/frontend".to_string()
|
||||
}
|
||||
|
||||
static FRONTEND_PATH: OnceLock<String> = OnceLock::new();
|
||||
FRONTEND_PATH.get_or_init(|| {
|
||||
let FrontendEnv {
|
||||
monitor_frontend_path,
|
||||
} = envy::from_env()
|
||||
.context("failed to parse FrontendEnv")
|
||||
.unwrap();
|
||||
monitor_frontend_path
|
||||
})
|
||||
}
|
||||
|
||||
pub fn core_config() -> &'static CoreConfig {
|
||||
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
|
||||
CORE_CONFIG.get_or_init(|| {
|
||||
let env: Env = envy::from_env()
|
||||
.context("failed to parse core Env")
|
||||
.unwrap();
|
||||
let config_path = &env.monitor_config_path;
|
||||
let mut config =
|
||||
parse_config_file::<CoreConfig>(config_path.as_str())
|
||||
.unwrap_or_else(|e| {
|
||||
panic!("failed at parsing config at {config_path} | {e:#}")
|
||||
});
|
||||
|
||||
// Overrides
|
||||
config.title = env.monitor_title.unwrap_or(config.title);
|
||||
config.host = env.monitor_host.unwrap_or(config.host);
|
||||
config.port = env.monitor_port.unwrap_or(config.port);
|
||||
config.passkey = env.monitor_passkey.unwrap_or(config.passkey);
|
||||
config.jwt_valid_for =
|
||||
env.monitor_jwt_valid_for.unwrap_or(config.jwt_valid_for);
|
||||
config.monitoring_interval = env
|
||||
.monitor_monitoring_interval
|
||||
.unwrap_or(config.monitoring_interval);
|
||||
config.keep_stats_for_days = env
|
||||
.monitor_keep_stats_for_days
|
||||
.unwrap_or(config.keep_stats_for_days);
|
||||
config.keep_alerts_for_days = env
|
||||
.monitor_keep_alerts_for_days
|
||||
.unwrap_or(config.keep_alerts_for_days);
|
||||
config.github_webhook_secret = env
|
||||
.monitor_github_webhook_secret
|
||||
.unwrap_or(config.github_webhook_secret);
|
||||
config.github_webhook_base_url = env
|
||||
.monitor_github_webhook_base_url
|
||||
.or(config.github_webhook_base_url);
|
||||
config.docker_organizations = env
|
||||
.monitor_docker_organizations
|
||||
.unwrap_or(config.docker_organizations);
|
||||
|
||||
config.logging.level =
|
||||
env.monitor_logging_level.unwrap_or(config.logging.level);
|
||||
config.logging.stdio =
|
||||
env.monitor_logging_stdio.unwrap_or(config.logging.stdio);
|
||||
config.logging.otlp_endpoint = env
|
||||
.monitor_logging_otlp_endpoint
|
||||
.or(config.logging.otlp_endpoint);
|
||||
config.logging.opentelemetry_service_name = env
|
||||
.monitor_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.logging.opentelemetry_service_name);
|
||||
|
||||
config.local_auth =
|
||||
env.monitor_local_auth.unwrap_or(config.local_auth);
|
||||
|
||||
config.github_oauth.enabled = env
|
||||
.monitor_github_oauth_enabled
|
||||
.unwrap_or(config.github_oauth.enabled);
|
||||
config.github_oauth.id = env
|
||||
.monitor_github_oauth_id
|
||||
.unwrap_or(config.github_oauth.id);
|
||||
config.github_oauth.secret = env
|
||||
.monitor_github_oauth_secret
|
||||
.unwrap_or(config.github_oauth.secret);
|
||||
|
||||
config.google_oauth.enabled = env
|
||||
.monitor_google_oauth_enabled
|
||||
.unwrap_or(config.google_oauth.enabled);
|
||||
config.google_oauth.id = env
|
||||
.monitor_google_oauth_id
|
||||
.unwrap_or(config.google_oauth.id);
|
||||
config.google_oauth.secret = env
|
||||
.monitor_google_oauth_secret
|
||||
.unwrap_or(config.google_oauth.secret);
|
||||
|
||||
config.mongo.uri = env.monitor_mongo_uri.or(config.mongo.uri);
|
||||
config.mongo.address =
|
||||
env.monitor_mongo_address.or(config.mongo.address);
|
||||
config.mongo.username =
|
||||
env.monitor_mongo_username.or(config.mongo.username);
|
||||
config.mongo.password =
|
||||
env.monitor_mongo_password.or(config.mongo.password);
|
||||
config.mongo.app_name =
|
||||
env.monitor_mongo_app_name.unwrap_or(config.mongo.app_name);
|
||||
config.mongo.db_name =
|
||||
env.monitor_mongo_db_name.unwrap_or(config.mongo.db_name);
|
||||
|
||||
config.aws.access_key_id = env
|
||||
.monitor_aws_access_key_id
|
||||
.unwrap_or(config.aws.access_key_id);
|
||||
config.aws.secret_access_key = env
|
||||
.monitor_aws_secret_access_key
|
||||
.unwrap_or(config.aws.secret_access_key);
|
||||
|
||||
config
|
||||
})
|
||||
}
|
||||
122
bin/core/src/db.rs
Normal file
122
bin/core/src/db.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use mongo_indexed::{create_index, create_unique_index};
|
||||
use monitor_client::entities::{
|
||||
alert::Alert,
|
||||
alerter::Alerter,
|
||||
api_key::ApiKey,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
config::core::MongoConfig,
|
||||
deployment::Deployment,
|
||||
permission::Permission,
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::{stats::SystemStatsRecord, Server},
|
||||
server_template::ServerTemplate,
|
||||
tag::Tag,
|
||||
update::Update,
|
||||
user::User,
|
||||
user_group::UserGroup,
|
||||
variable::Variable,
|
||||
};
|
||||
use mungos::{
|
||||
init::MongoBuilder,
|
||||
mongodb::{Collection, Database},
|
||||
};
|
||||
|
||||
pub struct DbClient {
|
||||
pub users: Collection<User>,
|
||||
pub user_groups: Collection<UserGroup>,
|
||||
pub permissions: Collection<Permission>,
|
||||
pub api_keys: Collection<ApiKey>,
|
||||
pub tags: Collection<Tag>,
|
||||
pub variables: Collection<Variable>,
|
||||
pub updates: Collection<Update>,
|
||||
pub alerts: Collection<Alert>,
|
||||
pub stats: Collection<SystemStatsRecord>,
|
||||
// RESOURCES
|
||||
pub servers: Collection<Server>,
|
||||
pub deployments: Collection<Deployment>,
|
||||
pub builds: Collection<Build>,
|
||||
pub builders: Collection<Builder>,
|
||||
pub repos: Collection<Repo>,
|
||||
pub procedures: Collection<Procedure>,
|
||||
pub alerters: Collection<Alerter>,
|
||||
pub server_templates: Collection<ServerTemplate>,
|
||||
//
|
||||
pub db: Database,
|
||||
}
|
||||
|
||||
impl DbClient {
|
||||
pub async fn new(
|
||||
MongoConfig {
|
||||
uri,
|
||||
address,
|
||||
username,
|
||||
password,
|
||||
app_name,
|
||||
db_name,
|
||||
}: &MongoConfig,
|
||||
) -> anyhow::Result<DbClient> {
|
||||
let mut client = MongoBuilder::default().app_name(app_name);
|
||||
|
||||
match (uri, address, username, password) {
|
||||
(Some(uri), _, _, _) => {
|
||||
client = client.uri(uri);
|
||||
}
|
||||
(_, Some(address), Some(username), Some(password)) => {
|
||||
client = client
|
||||
.address(address)
|
||||
.username(username)
|
||||
.password(password);
|
||||
}
|
||||
(_, Some(address), _, _) => {
|
||||
client = client.address(address);
|
||||
}
|
||||
_ => {
|
||||
error!("config.mongo not configured correctly. must pass either config.mongo.uri, or config.mongo.address + config.mongo.username? + config.mongo.password?");
|
||||
std::process::exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
let client = client.build().await?;
|
||||
let db = client.database(db_name);
|
||||
|
||||
let client = DbClient {
|
||||
users: mongo_indexed::collection(&db, true).await?,
|
||||
user_groups: mongo_indexed::collection(&db, true).await?,
|
||||
permissions: mongo_indexed::collection(&db, true).await?,
|
||||
api_keys: mongo_indexed::collection(&db, true).await?,
|
||||
tags: mongo_indexed::collection(&db, true).await?,
|
||||
variables: mongo_indexed::collection(&db, true).await?,
|
||||
updates: mongo_indexed::collection(&db, true).await?,
|
||||
alerts: mongo_indexed::collection(&db, true).await?,
|
||||
stats: mongo_indexed::collection(&db, true).await?,
|
||||
// RESOURCES
|
||||
servers: resource_collection(&db, "Server").await?,
|
||||
deployments: resource_collection(&db, "Deployment").await?,
|
||||
builds: resource_collection(&db, "Build").await?,
|
||||
builders: resource_collection(&db, "Builder").await?,
|
||||
repos: resource_collection(&db, "Repo").await?,
|
||||
alerters: resource_collection(&db, "Alerter").await?,
|
||||
procedures: resource_collection(&db, "Procedure").await?,
|
||||
server_templates: resource_collection(&db, "ServerTemplate")
|
||||
.await?,
|
||||
//
|
||||
db,
|
||||
};
|
||||
Ok(client)
|
||||
}
|
||||
}
|
||||
|
||||
async fn resource_collection<T>(
|
||||
db: &Database,
|
||||
collection_name: &str,
|
||||
) -> anyhow::Result<Collection<T>> {
|
||||
let coll = db.collection::<T>(collection_name);
|
||||
|
||||
create_unique_index(&coll, "name").await?;
|
||||
|
||||
create_index(&coll, "tags").await?;
|
||||
|
||||
Ok(coll)
|
||||
}
|
||||
94
bin/core/src/helpers/action_state.rs
Normal file
94
bin/core/src/helpers/action_state.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use monitor_client::{
|
||||
busy::Busy,
|
||||
entities::{
|
||||
build::BuildActionState, deployment::DeploymentActionState,
|
||||
procedure::ProcedureActionState, repo::RepoActionState,
|
||||
server::ServerActionState,
|
||||
},
|
||||
};
|
||||
|
||||
use super::cache::Cache;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ActionStates {
|
||||
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub deployment:
|
||||
Cache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
|
||||
pub procedure:
|
||||
Cache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
}
|
||||
|
||||
/// Need to be able to check "busy" with write lock acquired.
|
||||
#[derive(Default)]
|
||||
pub struct ActionState<States: Default + Send + 'static>(
|
||||
Mutex<States>,
|
||||
);
|
||||
|
||||
impl<States: Default + Busy + Copy + Send + 'static>
|
||||
ActionState<States>
|
||||
{
|
||||
pub fn get(&self) -> anyhow::Result<States> {
|
||||
Ok(
|
||||
*self
|
||||
.0
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("action state lock poisoned | {e:?}"))?,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn busy(&self) -> anyhow::Result<bool> {
|
||||
Ok(
|
||||
self
|
||||
.0
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("action state lock poisoned | {e:?}"))?
|
||||
.busy(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Will acquire lock, check busy, and if not will
|
||||
/// run the provided update function on the states.
|
||||
/// Returns a guard that returns the states to default (not busy) when dropped.
|
||||
pub fn update(
|
||||
&self,
|
||||
handler: impl Fn(&mut States),
|
||||
) -> anyhow::Result<UpdateGuard<States>> {
|
||||
let mut lock = self
|
||||
.0
|
||||
.lock()
|
||||
.map_err(|e| anyhow!("action state lock poisoned | {e:?}"))?;
|
||||
if lock.busy() {
|
||||
return Err(anyhow!("resource is busy"));
|
||||
}
|
||||
handler(&mut *lock);
|
||||
Ok(UpdateGuard(&self.0))
|
||||
}
|
||||
}
|
||||
|
||||
/// When dropped will return the inner state to default.
|
||||
/// The inner mutex guard must already be dropped BEFORE this is dropped,
|
||||
/// which is guaranteed as the inner guard is dropped by all public methods before
|
||||
/// user could drop UpdateGuard.
|
||||
pub struct UpdateGuard<'a, States: Default + Send + 'static>(
|
||||
&'a Mutex<States>,
|
||||
);
|
||||
|
||||
impl<'a, States: Default + Send + 'static> Drop
|
||||
for UpdateGuard<'a, States>
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
let mut lock = match self.0.lock() {
|
||||
Ok(lock) => lock,
|
||||
Err(e) => {
|
||||
error!("CRITICAL: an action state lock is poisoned | {e:?}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
*lock = States::default();
|
||||
}
|
||||
}
|
||||
255
bin/core/src/helpers/alert.rs
Normal file
255
bin/core/src/helpers/alert.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use anyhow::{anyhow, Context};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::entities::{
|
||||
alert::{Alert, AlertData},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
server::stats::SeverityLevel,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use reqwest::StatusCode;
|
||||
use slack::types::Block;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
#[instrument]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let alerters = find_collect(
|
||||
&db_client().await.alerters,
|
||||
doc! { "config.params.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(e) = alerters {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let alerters = alerters.unwrap();
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let handles = alerters.iter().map(|alerter| async {
|
||||
match &alerter.config {
|
||||
AlerterConfig::Slack(SlackAlerterConfig { url, enabled }) => {
|
||||
if !enabled {
|
||||
return Ok(());
|
||||
}
|
||||
send_slack_alert(url, alert)
|
||||
.await
|
||||
.context("failed to send slack alert")
|
||||
}
|
||||
AlerterConfig::Custom(CustomAlerterConfig { url, enabled }) => {
|
||||
if !enabled {
|
||||
return Ok(());
|
||||
}
|
||||
send_custom_alert(url, alert).await.context(format!(
|
||||
"failed to send alert to custom alerter at {url}"
|
||||
))
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(handles)
|
||||
.await
|
||||
.into_iter()
|
||||
.filter_map(|res| res.err())
|
||||
.for_each(|e| error!("{e:#}"));
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let res = reqwest::Client::new()
|
||||
.post(url)
|
||||
.json(alert)
|
||||
.send()
|
||||
.await
|
||||
.context("failed at post request to alerter")?;
|
||||
let status = res.status();
|
||||
if status != StatusCode::OK {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response text on alerter response")?;
|
||||
return Err(anyhow!(
|
||||
"post to alerter failed | {status} | {text}"
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_slack_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let (text, blocks): (_, Option<_>) = match &alert.data {
|
||||
AlertData::ServerUnreachable { name, region, .. } => {
|
||||
let region = fmt_region(region);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} is now *reachable*");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} is now *reachable*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} is *unreachable* ❌");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} is *unreachable* ❌"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
..
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let text = format!("{level} | *{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨");
|
||||
let blocks = vec![
|
||||
Block::header(format!("{level} 🚨")),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} cpu usage at *{percentage:.1}%* 📈 🚨"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
..
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} memory usage at *{percentage:.1}%* 💾 🚨"
|
||||
)),
|
||||
Block::section(format!(
|
||||
"using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
..
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
let text = format!("{level} | *{name}*{region} disk usage at *{percentage:.1}%* | mount point: *{path:?}* 💿 🚨");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} disk usage at *{percentage:.1}%* 💿 🚨"
|
||||
)),
|
||||
Block::section(format!(
|
||||
"mount point: {path:?} | using *{used_gb:.1} GiB* / *{total_gb:.1} GiB*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
name,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
..
|
||||
} => {
|
||||
let to = fmt_docker_container_state(to);
|
||||
let text = format!("📦 container *{name}* is now {to}");
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!(
|
||||
"server: {server_name}\nprevious: {from}"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed { instance_id } => {
|
||||
let text = format!(
|
||||
"{level} | Failed to terminated AWS builder instance"
|
||||
);
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(format!("instance id: {instance_id}")),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
let slack = slack::Client::new(url);
|
||||
slack.send_message(text, blocks).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn fmt_region(region: &Option<String>) -> String {
|
||||
match region {
|
||||
Some(region) => format!(" ({region})"),
|
||||
None => String::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_docker_container_state(state: &DeploymentState) -> String {
|
||||
match state {
|
||||
DeploymentState::Running => String::from("Running ▶️"),
|
||||
DeploymentState::Exited => String::from("Exited 🛑"),
|
||||
DeploymentState::Restarting => String::from("Restarting 🔄"),
|
||||
DeploymentState::NotDeployed => String::from("Not Deployed"),
|
||||
_ => state.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn fmt_level(level: SeverityLevel) -> &'static str {
|
||||
match level {
|
||||
SeverityLevel::Critical => "CRITICAL 🚨",
|
||||
SeverityLevel::Warning => "WARNING 🚨",
|
||||
SeverityLevel::Ok => "OK ✅",
|
||||
}
|
||||
}
|
||||
84
bin/core/src/helpers/cache.rs
Normal file
84
bin/core/src/helpers/cache.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
use std::{collections::HashMap, hash::Hash};
|
||||
|
||||
use monitor_client::busy::Busy;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Cache<K: PartialEq + Eq + Hash, T: Clone + Default> {
|
||||
cache: RwLock<HashMap<K, T>>,
|
||||
}
|
||||
|
||||
impl<
|
||||
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
T: Clone + Default,
|
||||
> Cache<K, T>
|
||||
{
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get(&self, key: &K) -> Option<T> {
|
||||
self.cache.read().await.get(key).cloned()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_or_insert_default(&self, key: &K) -> T {
|
||||
let mut lock = self.cache.write().await;
|
||||
match lock.get(key).cloned() {
|
||||
Some(item) => item,
|
||||
None => {
|
||||
let item: T = Default::default();
|
||||
lock.insert(key.clone(), item.clone());
|
||||
item
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_list(&self) -> Vec<T> {
|
||||
let cache = self.cache.read().await;
|
||||
cache.iter().map(|(_, e)| e.clone()).collect()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn insert<Key>(&self, key: Key, val: T)
|
||||
where
|
||||
T: std::fmt::Debug,
|
||||
Key: Into<K> + std::fmt::Debug,
|
||||
{
|
||||
self.cache.write().await.insert(key.into(), val);
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self, handler))]
|
||||
pub async fn update_entry<Key>(
|
||||
&self,
|
||||
key: Key,
|
||||
handler: impl Fn(&mut T),
|
||||
) where
|
||||
Key: Into<K> + std::fmt::Debug,
|
||||
{
|
||||
let mut cache = self.cache.write().await;
|
||||
handler(cache.entry(key.into()).or_default());
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn clear(&self) {
|
||||
self.cache.write().await.clear();
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn remove(&self, key: &K) {
|
||||
self.cache.write().await.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
K: PartialEq + Eq + Hash + std::fmt::Debug + Clone,
|
||||
T: Clone + Default + Busy,
|
||||
> Cache<K, T>
|
||||
{
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn busy(&self, id: &K) -> bool {
|
||||
match self.get(id).await {
|
||||
Some(state) => state.busy(),
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
34
bin/core/src/helpers/channel.rs
Normal file
34
bin/core/src/helpers/channel.rs
Normal file
@@ -0,0 +1,34 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use monitor_client::entities::update::{Update, UpdateListItem};
|
||||
use tokio::sync::{broadcast, Mutex};
|
||||
|
||||
/// A channel sending (build_id, update_id)
|
||||
pub fn build_cancel_channel(
|
||||
) -> &'static BroadcastChannel<(String, Update)> {
|
||||
static BUILD_CANCEL_CHANNEL: OnceLock<
|
||||
BroadcastChannel<(String, Update)>,
|
||||
> = OnceLock::new();
|
||||
BUILD_CANCEL_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
|
||||
}
|
||||
|
||||
pub fn update_channel() -> &'static BroadcastChannel<UpdateListItem> {
|
||||
static UPDATE_CHANNEL: OnceLock<BroadcastChannel<UpdateListItem>> =
|
||||
OnceLock::new();
|
||||
UPDATE_CHANNEL.get_or_init(|| BroadcastChannel::new(100))
|
||||
}
|
||||
|
||||
pub struct BroadcastChannel<T> {
|
||||
pub sender: Mutex<broadcast::Sender<T>>,
|
||||
pub receiver: broadcast::Receiver<T>,
|
||||
}
|
||||
|
||||
impl<T: Clone> BroadcastChannel<T> {
|
||||
pub fn new(capacity: usize) -> BroadcastChannel<T> {
|
||||
let (sender, receiver) = broadcast::channel(capacity);
|
||||
BroadcastChannel {
|
||||
sender: sender.into(),
|
||||
receiver,
|
||||
}
|
||||
}
|
||||
}
|
||||
118
bin/core/src/helpers/mod.rs
Normal file
118
bin/core/src/helpers/mod.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{anyhow, Context};
|
||||
use monitor_client::entities::{
|
||||
permission::{Permission, PermissionLevel, UserTarget},
|
||||
server::Server,
|
||||
update::ResourceTarget,
|
||||
user::User,
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use periphery_client::PeripheryClient;
|
||||
use rand::{thread_rng, Rng};
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
pub mod action_state;
|
||||
pub mod alert;
|
||||
pub mod cache;
|
||||
pub mod channel;
|
||||
pub mod procedure;
|
||||
pub mod prune;
|
||||
pub mod query;
|
||||
pub mod update;
|
||||
|
||||
// pub mod resource;
|
||||
|
||||
pub fn empty_or_only_spaces(word: &str) -> bool {
|
||||
if word.is_empty() {
|
||||
return true;
|
||||
}
|
||||
for char in word.chars() {
|
||||
if char != ' ' {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn random_duration(min_ms: u64, max_ms: u64) -> Duration {
|
||||
Duration::from_millis(thread_rng().gen_range(min_ms..max_ms))
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn remove_from_recently_viewed<T>(resource: T)
|
||||
where
|
||||
T: Into<ResourceTarget> + std::fmt::Debug,
|
||||
{
|
||||
let resource: ResourceTarget = resource.into();
|
||||
let (ty, id) = resource.extract_variant_id();
|
||||
if let Err(e) = db_client()
|
||||
.await
|
||||
.users
|
||||
.update_many(
|
||||
doc! {},
|
||||
doc! {
|
||||
"$pull": {
|
||||
"recently_viewed": {
|
||||
"type": ty.to_string(),
|
||||
"id": id,
|
||||
}
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to remove resource from users recently viewed")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
pub fn periphery_client(
|
||||
server: &Server,
|
||||
) -> anyhow::Result<PeripheryClient> {
|
||||
if !server.config.enabled {
|
||||
return Err(anyhow!("server not enabled"));
|
||||
}
|
||||
|
||||
let client = PeripheryClient::new(
|
||||
&server.config.address,
|
||||
&core_config().passkey,
|
||||
);
|
||||
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
pub async fn create_permission<T>(
|
||||
user: &User,
|
||||
target: T,
|
||||
level: PermissionLevel,
|
||||
) where
|
||||
T: Into<ResourceTarget> + std::fmt::Debug,
|
||||
{
|
||||
// No need to actually create permissions for admins
|
||||
if user.admin {
|
||||
return;
|
||||
}
|
||||
let target: ResourceTarget = target.into();
|
||||
if let Err(e) = db_client()
|
||||
.await
|
||||
.permissions
|
||||
.insert_one(
|
||||
Permission {
|
||||
id: Default::default(),
|
||||
user_target: UserTarget::User(user.id.clone()),
|
||||
resource_target: target.clone(),
|
||||
level,
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("failed to create permission for {target:?} | {e:#}");
|
||||
};
|
||||
}
|
||||
273
bin/core/src/helpers/procedure.rs
Normal file
273
bin/core/src/helpers/procedure.rs
Normal file
@@ -0,0 +1,273 @@
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use anyhow::{anyhow, Context, Ok};
|
||||
use futures::future::join_all;
|
||||
use monitor_client::{
|
||||
api::execute::Execution,
|
||||
entities::{
|
||||
monitor_timestamp,
|
||||
procedure::{EnabledExecution, Procedure, ProcedureType},
|
||||
update::Update,
|
||||
user::procedure_user,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::state::State;
|
||||
|
||||
use super::update::update_update;
|
||||
|
||||
#[instrument]
|
||||
pub async fn execute_procedure(
|
||||
procedure: &Procedure,
|
||||
update: &Mutex<Update>,
|
||||
) -> anyhow::Result<()> {
|
||||
let start_ts = monitor_timestamp();
|
||||
|
||||
use ProcedureType::*;
|
||||
match procedure.config.procedure_type {
|
||||
Sequence => {
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"executing sequence: {} ({})",
|
||||
procedure.name, procedure.id
|
||||
),
|
||||
)
|
||||
.await;
|
||||
execute_sequence(
|
||||
filter_list_by_enabled(&procedure.config.executions),
|
||||
&procedure.id,
|
||||
&procedure.name,
|
||||
update,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
let time = Duration::from_millis(
|
||||
(monitor_timestamp() - start_ts) as u64,
|
||||
);
|
||||
format!(
|
||||
"failed sequence execution after {time:?}. {} ({})",
|
||||
procedure.name, procedure.id
|
||||
)
|
||||
})?;
|
||||
let time = Duration::from_millis(
|
||||
(monitor_timestamp() - start_ts) as u64,
|
||||
);
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"finished sequence execution in {time:?}: {} ({}) ✅",
|
||||
procedure.name, procedure.id
|
||||
),
|
||||
)
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
Parallel => {
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"executing parallel: {} ({})",
|
||||
procedure.name, procedure.id
|
||||
),
|
||||
)
|
||||
.await;
|
||||
execute_parallel(
|
||||
filter_list_by_enabled(&procedure.config.executions),
|
||||
&procedure.id,
|
||||
&procedure.name,
|
||||
update,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
let time = Duration::from_millis(
|
||||
(monitor_timestamp() - start_ts) as u64,
|
||||
);
|
||||
format!(
|
||||
"failed parallel execution after {time:?}. {} ({})",
|
||||
procedure.name, procedure.id
|
||||
)
|
||||
})?;
|
||||
let time = Duration::from_millis(
|
||||
(monitor_timestamp() - start_ts) as u64,
|
||||
);
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"finished parallel execution in {time:?}: {} ({}) ✅",
|
||||
procedure.name, procedure.id
|
||||
),
|
||||
)
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn execute_execution(
|
||||
execution: Execution,
|
||||
|
||||
// used to prevent recursive procedure
|
||||
parent_id: &str,
|
||||
parent_name: &str,
|
||||
) -> anyhow::Result<()> {
|
||||
let user = procedure_user().to_owned();
|
||||
let update = match execution {
|
||||
Execution::None(_) => return Ok(()),
|
||||
Execution::RunProcedure(req) => {
|
||||
if req.procedure == parent_id || req.procedure == parent_name {
|
||||
return Err(anyhow!("Self referential procedure detected"));
|
||||
}
|
||||
State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at RunProcedure")?
|
||||
}
|
||||
Execution::RunBuild(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at RunBuild")?,
|
||||
Execution::Deploy(req) => {
|
||||
State.resolve(req, user).await.context("failed at Deploy")?
|
||||
}
|
||||
Execution::StartContainer(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at StartContainer")?,
|
||||
Execution::StopContainer(req) => {
|
||||
State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at StopContainer")?
|
||||
}
|
||||
Execution::StopAllContainers(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at StopAllContainers")?,
|
||||
Execution::RemoveContainer(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at RemoveContainer")?,
|
||||
Execution::CloneRepo(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at CloneRepo")?,
|
||||
Execution::PullRepo(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at PullRepo")?,
|
||||
Execution::PruneDockerNetworks(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at PruneDockerNetworks")?,
|
||||
Execution::PruneDockerImages(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at PruneDockerImages")?,
|
||||
Execution::PruneDockerContainers(req) => State
|
||||
.resolve(req, user)
|
||||
.await
|
||||
.context("failed at PruneDockerContainers")?,
|
||||
};
|
||||
if update.success {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow!(
|
||||
"execution not successful. see update {}",
|
||||
update.id
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn execute_sequence(
|
||||
executions: Vec<Execution>,
|
||||
parent_id: &str,
|
||||
parent_name: &str,
|
||||
update: &Mutex<Update>,
|
||||
) -> anyhow::Result<()> {
|
||||
for execution in executions {
|
||||
let now = Instant::now();
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!("executing stage: {execution:?}"),
|
||||
)
|
||||
.await;
|
||||
let fail_log = format!("failed on {execution:?}");
|
||||
execute_execution(execution.clone(), parent_id, parent_name)
|
||||
.await
|
||||
.context(fail_log)?;
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"finished stage in {:?}: {execution:?}",
|
||||
now.elapsed()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn execute_parallel(
|
||||
executions: Vec<Execution>,
|
||||
parent_id: &str,
|
||||
parent_name: &str,
|
||||
update: &Mutex<Update>,
|
||||
) -> anyhow::Result<()> {
|
||||
let futures = executions.into_iter().map(|execution| async move {
|
||||
let now = Instant::now();
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!("executing stage: {execution:?}"),
|
||||
)
|
||||
.await;
|
||||
let fail_log = format!("failed on {execution:?}");
|
||||
let res =
|
||||
execute_execution(execution.clone(), parent_id, parent_name)
|
||||
.await
|
||||
.context(fail_log);
|
||||
add_line_to_update(
|
||||
update,
|
||||
&format!(
|
||||
"finished stage in {:?}: {execution:?}",
|
||||
now.elapsed()
|
||||
),
|
||||
)
|
||||
.await;
|
||||
res
|
||||
});
|
||||
join_all(futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.collect::<anyhow::Result<_>>()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn filter_list_by_enabled(
|
||||
list: &[EnabledExecution],
|
||||
) -> Vec<Execution> {
|
||||
list
|
||||
.iter()
|
||||
.filter(|item| item.enabled)
|
||||
.map(|item| item.execution.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// ASSUMES FIRST LOG IS ALREADY CREATED
|
||||
#[instrument(level = "debug")]
|
||||
async fn add_line_to_update(update: &Mutex<Update>, line: &str) {
|
||||
let mut lock = update.lock().await;
|
||||
let log = &mut lock.logs[0];
|
||||
log.stdout.push('\n');
|
||||
log.stdout.push_str(line);
|
||||
let update = lock.clone();
|
||||
drop(lock);
|
||||
if let Err(e) = update_update(update).await {
|
||||
error!("failed to update an update during procedure | {e:#}");
|
||||
};
|
||||
}
|
||||
64
bin/core/src/helpers/prune.rs
Normal file
64
bin/core/src/helpers/prune.rs
Normal file
@@ -0,0 +1,64 @@
|
||||
use async_timing_util::{
|
||||
unix_timestamp_ms, wait_until_timelength, Timelength, ONE_DAY_MS,
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
pub fn spawn_prune_loop() {
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
wait_until_timelength(Timelength::OneDay, 5000).await;
|
||||
let (stats_res, alerts_res) =
|
||||
tokio::join!(prune_stats(), prune_alerts());
|
||||
if let Err(e) = stats_res {
|
||||
error!("error in pruning stats | {e:#}");
|
||||
}
|
||||
if let Err(e) = alerts_res {
|
||||
error!("error in pruning alerts | {e:#}");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn prune_stats() -> anyhow::Result<()> {
|
||||
if core_config().keep_stats_for_days == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let delete_before_ts = (unix_timestamp_ms()
|
||||
- core_config().keep_stats_for_days as u128 * ONE_DAY_MS)
|
||||
as i64;
|
||||
let res = db_client()
|
||||
.await
|
||||
.stats
|
||||
.delete_many(
|
||||
doc! {
|
||||
"ts": { "$lt": delete_before_ts }
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
info!("deleted {} stats from db", res.deleted_count);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn prune_alerts() -> anyhow::Result<()> {
|
||||
if core_config().keep_alerts_for_days == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let delete_before_ts = (unix_timestamp_ms()
|
||||
- core_config().keep_alerts_for_days as u128 * ONE_DAY_MS)
|
||||
as i64;
|
||||
let res = db_client()
|
||||
.await
|
||||
.alerts
|
||||
.delete_many(
|
||||
doc! {
|
||||
"ts": { "$lt": delete_before_ts }
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
info!("deleted {} alerts from db", res.deleted_count);
|
||||
Ok(())
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user