forked from github-starred/komodo
Compare commits
393 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a3a01f1625 | ||
|
|
aec8fa2bf1 | ||
|
|
7ff2dba82f | ||
|
|
9c86b2d239 | ||
|
|
b1fec7c663 | ||
|
|
8341c6b802 | ||
|
|
73285c4374 | ||
|
|
32d48cdb02 | ||
|
|
8e081fd09c | ||
|
|
04531f1dea | ||
|
|
80f439d472 | ||
|
|
d5e03d6d16 | ||
|
|
a9f55bb8e6 | ||
|
|
9e765f93f5 | ||
|
|
b3aa8e906f | ||
|
|
03fe442aa0 | ||
|
|
d268009a6a | ||
|
|
f0697e812a | ||
|
|
78766463d6 | ||
|
|
0fa1edba2c | ||
|
|
bbd968cac3 | ||
|
|
5f24fc1be3 | ||
|
|
7ecd2b0b0b | ||
|
|
7bf44d2e04 | ||
|
|
24e0672384 | ||
|
|
04f081631f | ||
|
|
b1af956b63 | ||
|
|
370712b29f | ||
|
|
2b6c552964 | ||
|
|
434a1d8ea9 | ||
|
|
0b7f28360f | ||
|
|
3c8ef0ab29 | ||
|
|
930b2423c3 | ||
|
|
546747b5f2 | ||
|
|
c6df866755 | ||
|
|
ea5e684915 | ||
|
|
64db8933de | ||
|
|
7a5580de57 | ||
|
|
b1656bb174 | ||
|
|
559ce103da | ||
|
|
75e278a57b | ||
|
|
430f3ddc34 | ||
|
|
6c30c202e9 | ||
|
|
c5401de1c5 | ||
|
|
7a3d9e0ef6 | ||
|
|
595e3ece42 | ||
|
|
a3bc895755 | ||
|
|
3e3def03ec | ||
|
|
bc672d9649 | ||
|
|
ea6dee4d51 | ||
|
|
b985f18c74 | ||
|
|
45909b2f04 | ||
|
|
2b5a54ce89 | ||
|
|
a18f33b95e | ||
|
|
f35b00ea95 | ||
|
|
70fab08520 | ||
|
|
0331780a5f | ||
|
|
06cdfd2bbc | ||
|
|
1555202569 | ||
|
|
5139622aad | ||
|
|
61ce2ee3db | ||
|
|
3171c14f2b | ||
|
|
521db748d8 | ||
|
|
35bf224080 | ||
|
|
e0b31cfe51 | ||
|
|
0a890078b0 | ||
|
|
df97ced7a4 | ||
|
|
d4e5e2e6d8 | ||
|
|
19aa60dcb5 | ||
|
|
fc19c53e6f | ||
|
|
4f0af960db | ||
|
|
e2ec5258fb | ||
|
|
49b6545a02 | ||
|
|
0aabaa9e62 | ||
|
|
dc65986eab | ||
|
|
1d8f28437d | ||
|
|
c1502e89c2 | ||
|
|
0bd15fc442 | ||
|
|
5a3621b02e | ||
|
|
38192e2dac | ||
|
|
5d271d5547 | ||
|
|
11fb67a35b | ||
|
|
a80499dcc4 | ||
|
|
8c76b8487f | ||
|
|
2b32d9042a | ||
|
|
dc48f1f2ca | ||
|
|
8e7b7bdcf1 | ||
|
|
f11d64f72e | ||
|
|
2ffae85180 | ||
|
|
bd79d0f1e0 | ||
|
|
e890b1f675 | ||
|
|
3b7de25c30 | ||
|
|
793bb99f31 | ||
|
|
d465c9f273 | ||
|
|
ce641a8974 | ||
|
|
1b89ceb122 | ||
|
|
2dbc011d26 | ||
|
|
246da88ae1 | ||
|
|
a8c16f64b1 | ||
|
|
a5b711a348 | ||
|
|
9666e9ad83 | ||
|
|
7479640c73 | ||
|
|
4823825035 | ||
|
|
23897a7acf | ||
|
|
20d5588b5c | ||
|
|
f7e15ccde5 | ||
|
|
cf7623b1fc | ||
|
|
d3c464c05d | ||
|
|
5c9d416aa4 | ||
|
|
aabcd88312 | ||
|
|
9d2624c6bc | ||
|
|
ee11fb0b6c | ||
|
|
45adfbddd0 | ||
|
|
d26d035dc6 | ||
|
|
e673ba0adf | ||
|
|
f876facfa7 | ||
|
|
3a47d57478 | ||
|
|
a707028277 | ||
|
|
0c6276c677 | ||
|
|
fc9c6706f1 | ||
|
|
7674269ce9 | ||
|
|
3b511c5adc | ||
|
|
87221a10e9 | ||
|
|
450cb6a148 | ||
|
|
f252cefb21 | ||
|
|
7855e9d688 | ||
|
|
feb263c15f | ||
|
|
4f8d1c22cc | ||
|
|
60bd47834e | ||
|
|
4d632a6b61 | ||
|
|
381dd76723 | ||
|
|
077e28a5fe | ||
|
|
6b02aaed7d | ||
|
|
e466944c05 | ||
|
|
8ff94b7465 | ||
|
|
b17df5ed7b | ||
|
|
207dc30206 | ||
|
|
c3eb386bdb | ||
|
|
4279e46892 | ||
|
|
8d3d2fee12 | ||
|
|
1df36c4266 | ||
|
|
36f7ad33c7 | ||
|
|
ec34b2c139 | ||
|
|
d14c28d1f2 | ||
|
|
68f7a0e9ce | ||
|
|
50f0376f0a | ||
|
|
bbd53747ad | ||
|
|
6a2adf1f83 | ||
|
|
128b15b94f | ||
|
|
8d74b377b7 | ||
|
|
d7e972e5c6 | ||
|
|
e5cb4aac5a | ||
|
|
d0f62f8326 | ||
|
|
47c4091a4b | ||
|
|
973480e2b3 | ||
|
|
b9e1cc87d2 | ||
|
|
05d20c8603 | ||
|
|
fe2d68a001 | ||
|
|
26fd5b2a6d | ||
|
|
76457bcb61 | ||
|
|
ebd2c2238d | ||
|
|
b7fc1bef7b | ||
|
|
50b9f2e1bf | ||
|
|
41ce86f6ab | ||
|
|
7a21c01e52 | ||
|
|
e63e282510 | ||
|
|
5456b36c18 | ||
|
|
fcfb58a7e9 | ||
|
|
2203004a74 | ||
|
|
996fb49823 | ||
|
|
35d22c77a2 | ||
|
|
44ab89600f | ||
|
|
0900e48cb8 | ||
|
|
c530a46a27 | ||
|
|
f69c8db3ea | ||
|
|
48f2f651e1 | ||
|
|
bdb5b4185e | ||
|
|
42a7b8c19b | ||
|
|
ded17e4840 | ||
|
|
80fb1e6889 | ||
|
|
1dc861f538 | ||
|
|
3da63395fd | ||
|
|
c40cbc4d77 | ||
|
|
05e352e88c | ||
|
|
5884c09fb8 | ||
|
|
f8add38043 | ||
|
|
501f734e8b | ||
|
|
de62732ac8 | ||
|
|
bfa61058cd | ||
|
|
72ca6d9910 | ||
|
|
4d1ac32ad3 | ||
|
|
927e5959fa | ||
|
|
37ccc6e1ef | ||
|
|
deaa8754f3 | ||
|
|
dd8ac67c72 | ||
|
|
be4457c9cf | ||
|
|
1868421815 | ||
|
|
366f7a12b4 | ||
|
|
75119370df | ||
|
|
9e85b9d4c8 | ||
|
|
8afbbf23dc | ||
|
|
770a1116a1 | ||
|
|
0b4aebbc24 | ||
|
|
f1696e26e4 | ||
|
|
1a7b682301 | ||
|
|
b0110b05aa | ||
|
|
561b490f26 | ||
|
|
cac1f0b42e | ||
|
|
28886fb304 | ||
|
|
fb84d4cf7d | ||
|
|
31e9624556 | ||
|
|
3864bb7115 | ||
|
|
cea8601246 | ||
|
|
a546364bf3 | ||
|
|
c8c62ea562 | ||
|
|
845e8780c7 | ||
|
|
db60347566 | ||
|
|
c3ea0239d6 | ||
|
|
e9d13449bf | ||
|
|
2daa92a639 | ||
|
|
6473080078 | ||
|
|
d3957f65dc | ||
|
|
cb34969f1e | ||
|
|
55a0a8cd05 | ||
|
|
89f08372c6 | ||
|
|
6a3ce2d426 | ||
|
|
4928378d46 | ||
|
|
eea222cfba | ||
|
|
6e9cc2dc77 | ||
|
|
55d45084d0 | ||
|
|
9657a44049 | ||
|
|
51fa9ae3c2 | ||
|
|
5fd256444e | ||
|
|
059716f178 | ||
|
|
0bee1fe2c5 | ||
|
|
1e58c1a958 | ||
|
|
ed1431db0a | ||
|
|
dc769ff159 | ||
|
|
098f23ac4c | ||
|
|
03f577d22f | ||
|
|
95ca217362 | ||
|
|
6d61045764 | ||
|
|
34e075eaf3 | ||
|
|
232dc0bb4e | ||
|
|
0cc0ee2aab | ||
|
|
edebe925ff | ||
|
|
5fd45bbc7b | ||
|
|
0a490dadb2 | ||
|
|
23847c15bc | ||
|
|
0d238aee4f | ||
|
|
98ad6cf5fa | ||
|
|
e35b81630b | ||
|
|
1215852fe4 | ||
|
|
4164b76ff5 | ||
|
|
26a9daffeb | ||
|
|
8bb9f16e9b | ||
|
|
b6eaf76497 | ||
|
|
073893da0e | ||
|
|
e71547f1c2 | ||
|
|
1991627990 | ||
|
|
3434d827a3 | ||
|
|
1ef8b9878a | ||
|
|
07ddaa8377 | ||
|
|
142c08cde4 | ||
|
|
1aa1422faa | ||
|
|
1394e8a6b1 | ||
|
|
420ee10211 | ||
|
|
e918461dc5 | ||
|
|
4dc9ca27be | ||
|
|
f49b186f2f | ||
|
|
6e039b41f1 | ||
|
|
e7cd77b022 | ||
|
|
556cbd04c7 | ||
|
|
4e3d181466 | ||
|
|
5d4326f46f | ||
|
|
4bb486ad0a | ||
|
|
d29c5112d8 | ||
|
|
d41315b8a4 | ||
|
|
847404388c | ||
|
|
eef8ec59b8 | ||
|
|
9eb32f9ff5 | ||
|
|
859bfe67ef | ||
|
|
21ea469cd4 | ||
|
|
7fb902b892 | ||
|
|
c9c4ac47ee | ||
|
|
f228cd31f3 | ||
|
|
4feecb4b97 | ||
|
|
e2680d0942 | ||
|
|
7422c0730d | ||
|
|
37ac0dc7e3 | ||
|
|
dccaca1df4 | ||
|
|
886aea4c36 | ||
|
|
cbca070bae | ||
|
|
b4bdd401f6 | ||
|
|
e546166240 | ||
|
|
21689ce0ad | ||
|
|
941787db64 | ||
|
|
d4b1aacac3 | ||
|
|
30f89461bf | ||
|
|
a42d1397e9 | ||
|
|
b29313c28f | ||
|
|
08a246a90c | ||
|
|
1a08df28d0 | ||
|
|
a226ffc256 | ||
|
|
b385ee5ec3 | ||
|
|
c78c34357d | ||
|
|
4b7c692f00 | ||
|
|
1ac98a096e | ||
|
|
281a2dc1ce | ||
|
|
0fe91378a6 | ||
|
|
11e76d1cf2 | ||
|
|
a3bcd71105 | ||
|
|
3ecc56dd76 | ||
|
|
7239cbb19b | ||
|
|
a0540f7011 | ||
|
|
37aea7605e | ||
|
|
78be913541 | ||
|
|
c34f5ebf49 | ||
|
|
e5822cefb8 | ||
|
|
4baab194cf | ||
|
|
a896583da6 | ||
|
|
7b2674c38b | ||
|
|
d1e32989e3 | ||
|
|
e802bb3882 | ||
|
|
27a38b1bf5 | ||
|
|
2bc8a754be | ||
|
|
7a2a54bec1 | ||
|
|
6a15150d59 | ||
|
|
1b1dca76da | ||
|
|
a032f0f4ff | ||
|
|
2749d49435 | ||
|
|
d88e42ef2d | ||
|
|
a370e7d121 | ||
|
|
d139ad2b3d | ||
|
|
8d2d180398 | ||
|
|
37ca4ca986 | ||
|
|
33e73b8543 | ||
|
|
cf6e36e90c | ||
|
|
9eb8b32f4a | ||
|
|
b400add6f1 | ||
|
|
24adb89d25 | ||
|
|
4674b2badb | ||
|
|
65d1a69cb9 | ||
|
|
0da5718991 | ||
|
|
6b26cd120c | ||
|
|
28e1bb19a4 | ||
|
|
166107ac07 | ||
|
|
d77201880f | ||
|
|
1d7629e9b2 | ||
|
|
198f690ca5 | ||
|
|
531c79a144 | ||
|
|
d685862713 | ||
|
|
af0f245b5b | ||
|
|
cba36861b7 | ||
|
|
2c2c1d47b4 | ||
|
|
3a6b997241 | ||
|
|
7122f79b9d | ||
|
|
9bcee8122b | ||
|
|
a49c98946e | ||
|
|
7d222a7241 | ||
|
|
33501dac3e | ||
|
|
4675dfa736 | ||
|
|
0be51dc784 | ||
|
|
52453d1320 | ||
|
|
25da97ac1a | ||
|
|
02db5a11d3 | ||
|
|
89a5272246 | ||
|
|
ae51ea1ad6 | ||
|
|
3bdb4bea16 | ||
|
|
677bb14b5d | ||
|
|
6700700a80 | ||
|
|
996d4aa129 | ||
|
|
75894a7282 | ||
|
|
2a065edcf1 | ||
|
|
6f3703acfb | ||
|
|
59e989ecdf | ||
|
|
951ff34a9e | ||
|
|
2d83105500 | ||
|
|
3d455f5142 | ||
|
|
01de8c4a9b | ||
|
|
d5de338561 | ||
|
|
58c1afb8ef | ||
|
|
230f357b5a | ||
|
|
991c95fff0 | ||
|
|
f6243fe6b1 | ||
|
|
9feeccba6e | ||
|
|
673c7f3a6b | ||
|
|
39f900d651 | ||
|
|
8a06a0d6ce | ||
|
|
7789ee4f4a | ||
|
|
0472b6a7f7 | ||
|
|
d1d2227d36 | ||
|
|
cea7c5fc5e |
10
.vscode/resolver.code-snippets
vendored
10
.vscode/resolver.code-snippets
vendored
@@ -3,8 +3,8 @@
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"impl Resolve<${0}> for ${1} {",
|
||||
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
|
||||
"\t\ttodo!()",
|
||||
"\t}",
|
||||
"}"
|
||||
@@ -15,9 +15,9 @@
|
||||
"prefix": "static",
|
||||
"body": [
|
||||
"fn ${1}() -> &'static ${2} {",
|
||||
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${3}.get_or_init(|| {",
|
||||
"\t\t${0}",
|
||||
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${0}.get_or_init(|| {",
|
||||
"\t\ttodo!()",
|
||||
"\t})",
|
||||
"}"
|
||||
]
|
||||
|
||||
1765
Cargo.lock
generated
1765
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
90
Cargo.toml
90
Cargo.toml
@@ -8,7 +8,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.19.5"
|
||||
version = "2.0.0-dev-91"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -25,115 +25,125 @@ periphery_client = { path = "client/periphery/rs" }
|
||||
environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
secret_file = { path = "lib/secret_file" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
transport = { path = "lib/transport" }
|
||||
database = { path = "lib/database" }
|
||||
encoding = { path = "lib/encoding" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
config = { path = "lib/config" }
|
||||
logger = { path = "lib/logger" }
|
||||
cache = { path = "lib/cache" }
|
||||
noise = { path = "lib/noise" }
|
||||
git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.5.1", default-features = false }
|
||||
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
serror = { version = "0.5.3", default-features = false }
|
||||
slack = { version = "2.0.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
async_timing_util = "1.0.0"
|
||||
async_timing_util = "1.1.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.2"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.2.0"
|
||||
toml_pretty = "2.0.0"
|
||||
mungos = "3.2.2"
|
||||
svi = "1.2.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.23", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.48.0", features = ["full"] }
|
||||
tokio-util = { version = "0.7.17", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
pin-project-lite = "0.2.16"
|
||||
futures = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.27.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.12.2", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
|
||||
axum-server = { version = "0.7.3", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.7", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.11.1", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
indexmap = { version = "2.12.0", features = ["serde"] }
|
||||
serde = { version = "1.0.227", features = ["derive"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
bson = { version = "2.15.0" } # must keep in sync with mongodb version
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.145"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.9.5"
|
||||
toml = "0.9.8"
|
||||
url = "2.5.7"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.99"
|
||||
thiserror = "2.0.16"
|
||||
anyhow = "1.0.100"
|
||||
thiserror = "2.0.17"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.30.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.30.0", features = ["rt-tokio"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.30.0"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
opentelemetry = "0.30.0"
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
opentelemetry = "0.31.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.47", features = ["derive"] }
|
||||
clap = { version = "4.5.52", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
openidconnect = { version = "4.0.1", features = ["accept-rfc3339-timestamps"] }
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false }
|
||||
openidconnect = "4.0.1"
|
||||
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] } # locked back with octorust
|
||||
rustls = { version = "0.23.35", features = ["aws-lc-rs"] }
|
||||
pem-rfc7468 = { version = "1.0.0", features = ["alloc"] }
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.17.1"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.31"
|
||||
pkcs8 = "0.10.2"
|
||||
snow = "0.10.0"
|
||||
hmac = "0.12.1"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.2"
|
||||
hex = "0.4.3"
|
||||
spki = "0.7.3"
|
||||
der = "0.7.10"
|
||||
|
||||
# SYSTEM
|
||||
hickory-resolver = "0.25.2"
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.2"
|
||||
sysinfo = "0.37.0"
|
||||
shell-escape = "0.1.5"
|
||||
crossterm = "0.29.0"
|
||||
bollard = "0.19.4"
|
||||
sysinfo = "0.37.1"
|
||||
shlex = "1.3.0"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.8.6"
|
||||
aws-sdk-ec2 = "1.167.0"
|
||||
aws-credential-types = "1.2.6"
|
||||
aws-config = "1.8.10"
|
||||
aws-sdk-ec2 = "1.188.0"
|
||||
aws-credential-types = "1.2.9"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.42"
|
||||
croner = "3.0.0"
|
||||
croner = "3.0.1"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.30", features = ["tokio", "gzip"] }
|
||||
async-compression = { version = "0.4.33", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
comfy-table = "7.2.1"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.10.0"
|
||||
dashmap = "6.1.0"
|
||||
wildcard = "0.3.0"
|
||||
colored = "3.0.0"
|
||||
regex = "1.11.2"
|
||||
bytes = "1.10.1"
|
||||
shell-escape = "0.1.5"
|
||||
bytes = "1.11.0"
|
||||
regex = "1.12.2"
|
||||
|
||||
2
action/build.ts
Normal file
2
action/build.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("build-komodo");
|
||||
5
action/deno.json
Normal file
5
action/deno.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"imports": {
|
||||
"@std/toml": "jsr:@std/toml"
|
||||
}
|
||||
}
|
||||
4
action/deploy-fe.ts
Normal file
4
action/deploy-fe.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
const cmd = "km run -y action deploy-komodo-fe-change";
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
2
action/deploy.ts
Executable file
2
action/deploy.ts
Executable file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("deploy-komodo");
|
||||
52
action/run.ts
Normal file
52
action/run.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import * as TOML from "@std/toml";
|
||||
|
||||
export const run = async (action: string) => {
|
||||
const branch = await new Deno.Command("bash", {
|
||||
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
|
||||
})
|
||||
.output()
|
||||
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
|
||||
|
||||
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
|
||||
const prev_version = (
|
||||
TOML.parse(cargo_toml_str) as {
|
||||
workspace: { package: { version: string } };
|
||||
}
|
||||
).workspace.package.version;
|
||||
|
||||
const [version, tag, count] = prev_version.split("-");
|
||||
const next_count = Number(count) + 1;
|
||||
|
||||
const next_version = `${version}-${tag}-${next_count}`;
|
||||
|
||||
await Deno.writeTextFile(
|
||||
"Cargo.toml",
|
||||
cargo_toml_str.replace(
|
||||
`version = "${prev_version}"`,
|
||||
`version = "${next_version}"`
|
||||
)
|
||||
);
|
||||
|
||||
// Cargo check first here to make sure lock file is updated before commit.
|
||||
const cmd = `
|
||||
cargo check
|
||||
echo ""
|
||||
|
||||
git add --all
|
||||
git commit --all --message "deploy ${version}-${tag}-${next_count}"
|
||||
|
||||
echo ""
|
||||
git push
|
||||
echo ""
|
||||
|
||||
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
|
||||
`
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0 && !line.startsWith("//"))
|
||||
.join(" && ");
|
||||
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
};
|
||||
@@ -1,7 +1,7 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
@@ -27,6 +27,6 @@ COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
## Uses chef for dependency caching to help speed up back-to-back builds.
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.89.0-bullseye AS chef
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
|
||||
WORKDIR /builder
|
||||
|
||||
# Plan just the RECIPE to see if things have changed
|
||||
@@ -31,6 +31,6 @@ COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -19,10 +19,13 @@ komodo_client.workspace = true
|
||||
database.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
noise.workspace = true
|
||||
# external
|
||||
futures-util.workspace = true
|
||||
comfy-table.workspace = true
|
||||
tokio-util.workspace = true
|
||||
serde_json.workspace = true
|
||||
crossterm.workspace = true
|
||||
serde_qs.workspace = true
|
||||
wildcard.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM rust:1.89.0-bullseye AS builder
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
@@ -20,6 +20,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -24,6 +24,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -13,6 +13,6 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -61,7 +61,8 @@ async fn list_containers(
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
servers: Default::default(),
|
||||
containers: Default::default(),
|
||||
}),
|
||||
)?;
|
||||
|
||||
@@ -145,7 +146,8 @@ pub async fn inspect_container(
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
servers: Default::default(),
|
||||
containers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
config::cli::args::database::DatabaseCommand, optional_string,
|
||||
};
|
||||
@@ -21,6 +22,7 @@ pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
|
||||
DatabaseCommand::Copy { yes, index, .. } => {
|
||||
copy(*index, *yes).await
|
||||
}
|
||||
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,3 +320,47 @@ async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
|
||||
|
||||
database::utils::copy(&source_db, &target_db).await
|
||||
}
|
||||
|
||||
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} 🦎",
|
||||
"Komodo".bold(),
|
||||
"V1 Downgrade".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Downgrade the database to V1 compatible data structures."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("run downgrade", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
db.collection::<Document>("Server")
|
||||
.update_many(doc! {}, doc! { "$set": { "info": null } })
|
||||
.await
|
||||
.context("Failed to downgrade Server schema")?;
|
||||
|
||||
info!(
|
||||
"V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -230,6 +230,12 @@ pub async fn handle(
|
||||
Execution::GlobalAutoUpdate(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateAllServerKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateCoreKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -494,6 +500,14 @@ pub async fn handle(
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateAllServerKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateCoreKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
|
||||
@@ -7,7 +7,7 @@ use komodo_client::{
|
||||
api::read::{
|
||||
ListActions, ListAlerters, ListBuilders, ListBuilds,
|
||||
ListDeployments, ListProcedures, ListRepos, ListResourceSyncs,
|
||||
ListSchedules, ListServers, ListStacks, ListTags,
|
||||
ListSchedules, ListServers, ListStacks, ListTags, ListTerminals,
|
||||
},
|
||||
entities::{
|
||||
ResourceTargetVariant,
|
||||
@@ -35,6 +35,7 @@ use komodo_client::{
|
||||
ResourceSyncListItem, ResourceSyncListItemInfo,
|
||||
ResourceSyncState,
|
||||
},
|
||||
terminal::Terminal,
|
||||
},
|
||||
};
|
||||
use serde::Serialize;
|
||||
@@ -74,15 +75,18 @@ pub async fn handle(list: &args::list::List) -> anyhow::Result<()> {
|
||||
Some(ListCommand::Syncs(filters)) => {
|
||||
list_resources::<ResourceSyncListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Terminals(filters)) => {
|
||||
list_terminals(filters).await
|
||||
}
|
||||
Some(ListCommand::Schedules(filters)) => {
|
||||
list_schedules(filters).await
|
||||
}
|
||||
Some(ListCommand::Builders(filters)) => {
|
||||
list_resources::<BuilderListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Alerters(filters)) => {
|
||||
list_resources::<AlerterListItem>(filters, false).await
|
||||
}
|
||||
Some(ListCommand::Schedules(filters)) => {
|
||||
list_schedules(filters).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -189,6 +193,26 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_terminals(
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
// let query = ResourceQuery::builder()
|
||||
// .tags(filters.tags.clone())
|
||||
// .templates(TemplatesQueryBehavior::Exclude)
|
||||
// .build();
|
||||
let terminals = client
|
||||
.read(ListTerminals {
|
||||
target: None,
|
||||
use_names: true,
|
||||
})
|
||||
.await?;
|
||||
if !terminals.is_empty() {
|
||||
print_items(terminals, filters.format, filters.links)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list_schedules(
|
||||
filters: &ResourceFilters,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -794,7 +818,7 @@ impl PrintTable for ResourceListItem<ServerListItemInfo> {
|
||||
Cell::new(self.info.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.info.address),
|
||||
Cell::new(self.info.address.as_deref().unwrap_or("inbound")),
|
||||
Cell::new(self.tags.join(", ")),
|
||||
];
|
||||
if links {
|
||||
@@ -1134,6 +1158,28 @@ impl PrintTable for ResourceListItem<AlerterListItemInfo> {
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for Terminal {
|
||||
fn header(_links: bool) -> &'static [&'static str] {
|
||||
&["Terminal", "Target", "Command", "Size", "Created"]
|
||||
}
|
||||
fn row(self, _links: bool) -> Vec<comfy_table::Cell> {
|
||||
vec![
|
||||
Cell::new(self.name).add_attribute(Attribute::Bold),
|
||||
Cell::new(format!("{:?}", self.target)),
|
||||
Cell::new(self.command),
|
||||
Cell::new(if self.stored_size_kb < 1.0 {
|
||||
format!("{:.1} KiB", self.stored_size_kb)
|
||||
} else {
|
||||
format!("{:.} KiB", self.stored_size_kb)
|
||||
}),
|
||||
Cell::new(
|
||||
format_timetamp(self.created_at)
|
||||
.unwrap_or_else(|_| String::from("Invalid created at")),
|
||||
),
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
impl PrintTable for Schedule {
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
@@ -1146,7 +1192,7 @@ impl PrintTable for Schedule {
|
||||
let next_run = if let Some(ts) = self.next_scheduled_run {
|
||||
Cell::new(
|
||||
format_timetamp(ts)
|
||||
.unwrap_or(String::from("Invalid next ts")),
|
||||
.unwrap_or_else(|_| String::from("Invalid next ts")),
|
||||
)
|
||||
.add_attribute(Attribute::Bold)
|
||||
} else {
|
||||
|
||||
@@ -18,6 +18,7 @@ pub mod container;
|
||||
pub mod database;
|
||||
pub mod execute;
|
||||
pub mod list;
|
||||
pub mod terminal;
|
||||
pub mod update;
|
||||
|
||||
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {
|
||||
|
||||
334
bin/cli/src/command/terminal.rs
Normal file
334
bin/cli/src/command/terminal.rs
Normal file
@@ -0,0 +1,334 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
read::{ListAllDockerContainers, ListServers},
|
||||
terminal::InitTerminal,
|
||||
},
|
||||
entities::{
|
||||
config::cli::args::terminal::{Attach, Connect, Exec},
|
||||
server::ServerQuery,
|
||||
terminal::{
|
||||
ContainerTerminalMode, TerminalRecreateMode,
|
||||
TerminalResizeMessage, TerminalStdinMessage,
|
||||
},
|
||||
},
|
||||
ws::terminal::TerminalWebsocket,
|
||||
};
|
||||
use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub async fn handle_connect(
|
||||
Connect {
|
||||
server,
|
||||
name,
|
||||
command,
|
||||
recreate,
|
||||
}: &Connect,
|
||||
) -> anyhow::Result<()> {
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_server_terminal(
|
||||
server.to_string(),
|
||||
Some(name.to_string()),
|
||||
Some(InitTerminal {
|
||||
command: command.clone(),
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: None,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn handle_exec(
|
||||
Exec {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
recreate,
|
||||
}: &Exec,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_server(server.clone(), container).await?;
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_container_terminal(
|
||||
server,
|
||||
container.to_string(),
|
||||
None,
|
||||
Some(InitTerminal {
|
||||
command: Some(shell.to_string()),
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: Some(ContainerTerminalMode::Exec),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn handle_attach(
|
||||
Attach {
|
||||
server,
|
||||
container,
|
||||
recreate,
|
||||
}: &Attach,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_server(server.clone(), container).await?;
|
||||
handle_terminal_forwarding(async {
|
||||
super::komodo_client()
|
||||
.await?
|
||||
.connect_container_terminal(
|
||||
server,
|
||||
container.to_string(),
|
||||
None,
|
||||
Some(InitTerminal {
|
||||
command: None,
|
||||
recreate: if *recreate {
|
||||
TerminalRecreateMode::Always
|
||||
} else {
|
||||
TerminalRecreateMode::DifferentCommand
|
||||
},
|
||||
mode: Some(ContainerTerminalMode::Attach),
|
||||
}),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_server(
|
||||
server: Option<String>,
|
||||
container: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
if let Some(server) = server {
|
||||
return Ok(server);
|
||||
}
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let mut containers = client
|
||||
.read(ListAllDockerContainers {
|
||||
servers: Default::default(),
|
||||
containers: vec![container.to_string()],
|
||||
})
|
||||
.await?;
|
||||
|
||||
if containers.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Did not find any container matching {container}"
|
||||
));
|
||||
}
|
||||
|
||||
if containers.len() == 1 {
|
||||
return containers
|
||||
.pop()
|
||||
.context("Shouldn't happen")?
|
||||
.server_id
|
||||
.context("Container doesn't have server_id");
|
||||
}
|
||||
|
||||
let servers = containers
|
||||
.into_iter()
|
||||
.flat_map(|container| container.server_id)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let servers = client
|
||||
.read(ListServers {
|
||||
query: ServerQuery::builder().names(servers).build(),
|
||||
})
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|server| format!("\t- {}", server.name.bold()))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
Err(anyhow!(
|
||||
"Multiple containers matching '{}' on Servers:\n{servers}",
|
||||
container.bold(),
|
||||
))
|
||||
}
|
||||
|
||||
async fn handle_terminal_forwarding<
|
||||
C: Future<Output = anyhow::Result<TerminalWebsocket>>,
|
||||
>(
|
||||
connect: C,
|
||||
) -> anyhow::Result<()> {
|
||||
// Need to forward multiple sources into ws write
|
||||
let (write_tx, mut write_rx) =
|
||||
tokio::sync::mpsc::channel::<TerminalStdinMessage>(1024);
|
||||
|
||||
// ================
|
||||
// SETUP RESIZING
|
||||
// ================
|
||||
|
||||
// Subscribe to SIGWINCH for resize messages
|
||||
let mut sigwinch = tokio::signal::unix::signal(
|
||||
tokio::signal::unix::SignalKind::window_change(),
|
||||
)
|
||||
.context("failed to register SIGWINCH handler")?;
|
||||
|
||||
// Send first resize messsage, bailing if it fails to get the size.
|
||||
write_tx.send(resize_message()?).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
|
||||
let forward_resize = async {
|
||||
while future_or_cancel(sigwinch.recv(), &cancel)
|
||||
.await
|
||||
.flatten()
|
||||
.is_some()
|
||||
{
|
||||
if let Ok(resize_message) = resize_message()
|
||||
&& write_tx.send(resize_message).await.is_err()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
let forward_stdin = async {
|
||||
let mut stdin = tokio::io::stdin();
|
||||
let mut buf = [0u8; 8192];
|
||||
while let Some(Ok(n)) =
|
||||
future_or_cancel(stdin.read(&mut buf), &cancel).await
|
||||
{
|
||||
// EOF
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
let bytes = &buf[..n];
|
||||
// Check for disconnect sequence (alt + q)
|
||||
if bytes == [197, 147] {
|
||||
break;
|
||||
}
|
||||
// Forward bytes
|
||||
if write_tx
|
||||
.send(TerminalStdinMessage::Forward(bytes.to_vec()))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
break;
|
||||
};
|
||||
}
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
// =====================
|
||||
// CONNECT AND FORWARD
|
||||
// =====================
|
||||
|
||||
let (mut ws_write, mut ws_read) = connect.await?.split();
|
||||
|
||||
let forward_write = async {
|
||||
while let Some(message) =
|
||||
future_or_cancel(write_rx.recv(), &cancel).await.flatten()
|
||||
{
|
||||
if let Err(e) = ws_write.send_stdin_message(message).await {
|
||||
cancel.cancel();
|
||||
return Some(e);
|
||||
};
|
||||
}
|
||||
cancel.cancel();
|
||||
None
|
||||
};
|
||||
|
||||
let forward_read = async {
|
||||
let mut stdout = tokio::io::stdout();
|
||||
while let Some(msg) =
|
||||
future_or_cancel(ws_read.receive_stdout(), &cancel).await
|
||||
{
|
||||
let bytes = match msg {
|
||||
Ok(Some(bytes)) => bytes,
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
cancel.cancel();
|
||||
return Some(e.context("Websocket read error"));
|
||||
}
|
||||
};
|
||||
if let Err(e) = stdout
|
||||
.write_all(&bytes)
|
||||
.await
|
||||
.context("Failed to write text to stdout")
|
||||
{
|
||||
cancel.cancel();
|
||||
return Some(e);
|
||||
}
|
||||
let _ = stdout.flush().await;
|
||||
}
|
||||
cancel.cancel();
|
||||
None
|
||||
};
|
||||
|
||||
let guard = RawModeGuard::enable_raw_mode()?;
|
||||
|
||||
let (_, _, write_error, read_error) = tokio::join!(
|
||||
forward_resize,
|
||||
forward_stdin,
|
||||
forward_write,
|
||||
forward_read
|
||||
);
|
||||
|
||||
drop(guard);
|
||||
|
||||
if let Some(e) = write_error {
|
||||
eprintln!("\nFailed to forward stdin | {e:#}");
|
||||
}
|
||||
|
||||
if let Some(e) = read_error {
|
||||
eprintln!("\nFailed to forward stdout | {e:#}");
|
||||
}
|
||||
|
||||
println!("\n\n{} {}", "connection".bold(), "closed".red().bold());
|
||||
|
||||
// It doesn't seem to exit by itself after the raw mode stuff.
|
||||
std::process::exit(0)
|
||||
}
|
||||
|
||||
fn resize_message() -> anyhow::Result<TerminalStdinMessage> {
|
||||
let (cols, rows) = crossterm::terminal::size()
|
||||
.context("Failed to get terminal size")?;
|
||||
Ok(TerminalStdinMessage::Resize(TerminalResizeMessage {
|
||||
rows,
|
||||
cols,
|
||||
}))
|
||||
}
|
||||
|
||||
struct RawModeGuard;
|
||||
|
||||
impl RawModeGuard {
|
||||
fn enable_raw_mode() -> anyhow::Result<Self> {
|
||||
crossterm::terminal::enable_raw_mode()
|
||||
.context("Failed to enable terminal raw mode")?;
|
||||
Ok(Self)
|
||||
}
|
||||
}
|
||||
impl Drop for RawModeGuard {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = crossterm::terminal::disable_raw_mode() {
|
||||
eprintln!("Failed to disable terminal raw mode | {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn future_or_cancel<T, F: Future<Output = T>>(
|
||||
fut: F,
|
||||
cancel: &CancellationToken,
|
||||
) -> Option<T> {
|
||||
tokio::select! {
|
||||
res = fut => Some(res),
|
||||
_ = cancel.cancelled() => None
|
||||
}
|
||||
}
|
||||
@@ -28,7 +28,7 @@ pub fn cli_env() -> &'static Env {
|
||||
{
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}");
|
||||
panic!("{e:?}")
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -261,12 +261,18 @@ pub fn cli_config() -> &'static CliConfig {
|
||||
.komodo_cli_logging_pretty
|
||||
.unwrap_or(config.cli_logging.pretty),
|
||||
location: false,
|
||||
ansi: env
|
||||
.komodo_cli_logging_ansi
|
||||
.unwrap_or(config.cli_logging.ansi),
|
||||
otlp_endpoint: env
|
||||
.komodo_cli_logging_otlp_endpoint
|
||||
.unwrap_or(config.cli_logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.komodo_cli_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_service_name),
|
||||
opentelemetry_scope_name: env
|
||||
.komodo_cli_logging_opentelemetry_scope_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_scope_name),
|
||||
},
|
||||
profile: config.profile,
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
extern crate tracing;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::entities::config::cli::args;
|
||||
|
||||
use crate::config::cli_config;
|
||||
@@ -54,6 +55,18 @@ async fn app() -> anyhow::Result<()> {
|
||||
args::Command::Update { command } => {
|
||||
command::update::handle(command).await
|
||||
}
|
||||
args::Command::Connect(connect) => {
|
||||
command::terminal::handle_connect(connect).await
|
||||
}
|
||||
args::Command::Exec(exec) => {
|
||||
command::terminal::handle_exec(exec).await
|
||||
}
|
||||
args::Command::Attach(attach) => {
|
||||
command::terminal::handle_attach(attach).await
|
||||
}
|
||||
args::Command::Key { command } => {
|
||||
noise::key::command::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
@@ -66,7 +79,18 @@ async fn main() -> anyhow::Result<()> {
|
||||
tokio::signal::unix::SignalKind::terminate(),
|
||||
)?;
|
||||
tokio::select! {
|
||||
res = tokio::spawn(app()) => res?,
|
||||
_ = term_signal.recv() => Ok(()),
|
||||
res = tokio::spawn(app()) => match res {
|
||||
Ok(Err(e)) => {
|
||||
eprintln!("{}: {e}", "ERROR".red());
|
||||
std::process::exit(1)
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("{}: {e}", "ERROR".red());
|
||||
std::process::exit(1)
|
||||
},
|
||||
Ok(_) => {}
|
||||
},
|
||||
_ = term_signal.recv() => {},
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -19,13 +19,17 @@ komodo_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
secret_file.workspace = true
|
||||
formatting.workspace = true
|
||||
transport.workspace = true
|
||||
database.workspace = true
|
||||
encoding.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
cache.workspace = true
|
||||
noise.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
@@ -38,10 +42,10 @@ slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
aws-credential-types.workspace = true
|
||||
tokio-tungstenite.workspace = true
|
||||
english-to-cron.workspace = true
|
||||
openidconnect.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
futures-util.workspace = true
|
||||
axum-server.workspace = true
|
||||
urlencoding.workspace = true
|
||||
aws-sdk-ec2.workspace = true
|
||||
@@ -51,18 +55,16 @@ axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml_ng.workspace = true
|
||||
serde_qs.workspace = true
|
||||
typeshare.workspace = true
|
||||
chrono-tz.workspace = true
|
||||
indexmap.workspace = true
|
||||
octorust.workspace = true
|
||||
wildcard.workspace = true
|
||||
arc-swap.workspace = true
|
||||
colored.workspace = true
|
||||
dashmap.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
futures.workspace = true
|
||||
nom_pem.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
croner.workspace = true
|
||||
@@ -70,14 +72,16 @@ chrono.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
rustls.workspace = true
|
||||
bytes.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
regex.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
envy.workspace = true
|
||||
rand.workspace = true
|
||||
hmac.workspace = true
|
||||
sha2.workspace = true
|
||||
hex.workspace = true
|
||||
url.workspace = true
|
||||
@@ -1,7 +1,7 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.89.0-bullseye AS core-builder
|
||||
FROM rust:1.90.0-trixie AS core-builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
@@ -26,7 +26,7 @@ RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:trixie-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
@@ -48,6 +48,9 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
@@ -55,9 +58,11 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -13,7 +13,7 @@ FROM ${AARCH64_BINARIES} AS aarch64
|
||||
FROM ${FRONTEND_IMAGE} AS frontend
|
||||
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:trixie-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
@@ -28,7 +28,7 @@ COPY --from=x86_64 /core /app/core/linux/amd64
|
||||
COPY --from=aarch64 /core /app/core/linux/arm64
|
||||
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
|
||||
|
||||
# Same for util
|
||||
# Same for km
|
||||
COPY --from=x86_64 /km /app/km/linux/amd64
|
||||
COPY --from=aarch64 /km /app/km/linux/arm64
|
||||
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
|
||||
@@ -44,6 +44,9 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
@@ -51,9 +54,12 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
ENTRYPOINT [ "entrypoint.sh" ]
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -14,7 +14,7 @@ COPY ./client/core/ts ./client
|
||||
RUN cd client && yarn && yarn build && yarn link
|
||||
RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
FROM debian:trixie-slim
|
||||
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
@@ -33,6 +33,9 @@ RUN mkdir /action-cache && \
|
||||
cd /action-cache && \
|
||||
deno install jsr:@std/yaml jsr:@std/toml
|
||||
|
||||
COPY ./bin/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
@@ -40,9 +43,12 @@ ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
ENTRYPOINT [ "entrypoint.sh" ]
|
||||
CMD [ "core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -4,7 +4,6 @@ use serde::Serialize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -50,7 +49,7 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} is now **reachable**\n{link}"
|
||||
"{level} | **{name}**{region} is now **connected**\n{link}"
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
@@ -241,31 +240,33 @@ pub async fn send_alert(
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use futures_util::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant,
|
||||
@@ -12,7 +11,6 @@ use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
stack::StackState,
|
||||
};
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
use crate::helpers::{
|
||||
@@ -25,40 +23,32 @@ mod ntfy;
|
||||
mod pushover;
|
||||
mod slack;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let span =
|
||||
info_span!("send_alerts", alerts = format!("{alerts:?}"));
|
||||
async {
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
.instrument(span)
|
||||
.await
|
||||
join_all(handles).await;
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
@@ -162,7 +152,6 @@ pub async fn send_alert_to_alerter(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -295,7 +284,7 @@ fn standard_alert_content(alert: &Alert) -> String {
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now reachable\n{link}")
|
||||
format!("{level} | {name}{region} is now connected\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
|
||||
@@ -2,17 +2,38 @@ use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
email: Option<&str>,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, email, content).await?;
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, email, content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
@@ -22,7 +43,7 @@ async fn send_message(
|
||||
) -> anyhow::Result<()> {
|
||||
let mut request = http_client()
|
||||
.post(url)
|
||||
.header("Title", "ntfy Alert")
|
||||
.header("Title", "Komodo Alert")
|
||||
.body(content);
|
||||
|
||||
if let Some(email) = email {
|
||||
@@ -43,9 +64,7 @@ async fn send_message(
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
"Failed to send message to ntfy | {} | {}",
|
||||
status,
|
||||
text
|
||||
"Failed to send message to ntfy | {status} | {text}",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,16 +2,35 @@ use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let content = standard_alert_content(alert);
|
||||
if !content.is_empty() {
|
||||
send_message(url, content).await?;
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, content).await.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use ::slack::types::OwnedBlock as Block;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -62,11 +63,11 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} is now *reachable*");
|
||||
format!("{level} | *{name}*{region} is now *connected*");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} is now *reachable*"
|
||||
"*{name}*{region} is now *connnected*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
@@ -466,18 +467,23 @@ pub async fn send_alert(
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
if text.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack.send_message(text, blocks).await.map_err(|e| {
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack
|
||||
.send_owned_message_single(&text, None, blocks.as_deref())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
@@ -488,6 +494,5 @@ pub async fn send_alert(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::time::Instant;
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
@@ -88,7 +88,6 @@ async fn variant_handler(
|
||||
handler(headers, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
|
||||
async fn handler(
|
||||
headers: HeaderMap,
|
||||
Json(request): Json<AuthRequest>,
|
||||
@@ -108,29 +107,32 @@ async fn handler(
|
||||
res.map(|res| res.0)
|
||||
}
|
||||
|
||||
fn login_options_response() -> GetLoginOptionsResponse {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: github_oauth_client().is_some(),
|
||||
google: google_oauth_client().is_some(),
|
||||
oidc: oidc_client().load().is_some(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
static GET_LOGIN_OPTIONS_RESPONSE: OnceLock<
|
||||
GetLoginOptionsResponse,
|
||||
> = OnceLock::new();
|
||||
GET_LOGIN_OPTIONS_RESPONSE.get_or_init(|| {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: github_oauth_client().is_some(),
|
||||
google: google_oauth_client().is_some(),
|
||||
oidc: oidc_client().load().is_some(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetLoginOptions {
|
||||
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
) -> serror::Result<GetLoginOptionsResponse> {
|
||||
Ok(login_options_response())
|
||||
Ok(*login_options_reponse())
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for ExchangeForJwt {
|
||||
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
@@ -143,7 +145,6 @@ impl Resolve<AuthArgs> for ExchangeForJwt {
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetUser {
|
||||
#[instrument(name = "GetUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
AuthArgs { headers }: &AuthArgs,
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_command;
|
||||
use command::run_komodo_standard_command;
|
||||
use config::merge_objects;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
@@ -24,6 +23,7 @@ use komodo_client::{
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
random_string,
|
||||
update::Update,
|
||||
user::action_user,
|
||||
},
|
||||
@@ -38,7 +38,6 @@ use crate::{
|
||||
config::core_config,
|
||||
helpers::{
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
random_string,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
@@ -59,10 +58,18 @@ impl super::BatchExecute for BatchRunAction {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunAction>(&self.pattern, user)
|
||||
@@ -72,10 +79,19 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunAction {
|
||||
#[instrument(name = "RunAction", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
action = self.action,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut action = get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
@@ -142,15 +158,11 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let file = format!("{}.ts", random_string(10));
|
||||
let path = core_config().action_directory.join(&file);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize Action file parent directory {parent:?}"))?;
|
||||
}
|
||||
|
||||
fs::write(&path, contents).await.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
secret_file::write_async(&path, contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
|
||||
let CoreConfig { ssl_enabled, .. } = core_config();
|
||||
|
||||
@@ -166,7 +178,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
""
|
||||
};
|
||||
|
||||
let mut res = run_komodo_command(
|
||||
let mut res = run_komodo_standard_command(
|
||||
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
|
||||
"Execute Action",
|
||||
None,
|
||||
@@ -217,7 +229,6 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && action.config.failure_alert {
|
||||
warn!("action unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -240,6 +251,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("Interpolate", skip(contents, update, secret))]
|
||||
async fn interpolate(
|
||||
contents: &mut String,
|
||||
update: &mut Update,
|
||||
@@ -325,6 +337,7 @@ main()
|
||||
/// Cleans up file at given path.
|
||||
/// ALSO if $DENO_DIR is set,
|
||||
/// will clean up the generated file matching "file"
|
||||
#[instrument("CleanupRun")]
|
||||
async fn cleanup_run(file: String, path: &Path) {
|
||||
if let Err(e) = fs::remove_file(path).await {
|
||||
warn!(
|
||||
@@ -344,7 +357,7 @@ fn deno_dir() -> Option<&'static Path> {
|
||||
DENO_DIR
|
||||
.get_or_init(|| {
|
||||
let deno_dir = std::env::var("DENO_DIR").ok()?;
|
||||
PathBuf::from_str(&deno_dir).ok()
|
||||
Some(PathBuf::from(&deno_dir))
|
||||
})
|
||||
.as_deref()
|
||||
}
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures::{TryStreamExt, stream::FuturesUnordered};
|
||||
use futures_util::{
|
||||
StreamExt, TryStreamExt, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::execute::{SendAlert, TestAlerter},
|
||||
entities::{
|
||||
@@ -22,10 +24,19 @@ use crate::{
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
#[instrument(name = "TestAlerter", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"TestAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
alerter = self.alerter,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerter = get_check_permissions::<Alerter>(
|
||||
&self.alerter,
|
||||
@@ -79,15 +90,24 @@ impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
//
|
||||
|
||||
impl Resolve<ExecuteArgs> for SendAlert {
|
||||
#[instrument(name = "SendAlert", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"SendAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{self:?}"),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerters = list_full_for_user::<Alerter>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await?
|
||||
@@ -102,6 +122,28 @@ impl Resolve<ExecuteArgs> for SendAlert {
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let alerters = if user.admin {
|
||||
alerters
|
||||
} else {
|
||||
// Only keep alerters with execute permissions
|
||||
alerters
|
||||
.into_iter()
|
||||
.map(|alerter| async move {
|
||||
get_check_permissions::<Alerter>(
|
||||
&alerter.id,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect()
|
||||
};
|
||||
|
||||
if alerters.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
|
||||
|
||||
@@ -14,12 +14,15 @@ use database::mungos::{
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use futures_util::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
RunBuild,
|
||||
api::{
|
||||
execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
RunBuild,
|
||||
},
|
||||
write::RefreshBuildCache,
|
||||
},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
@@ -37,12 +40,14 @@ use komodo_client::{
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
build_git_token,
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
query::{
|
||||
VariablesAndSecrets, get_deployment_state,
|
||||
@@ -66,10 +71,18 @@ impl super::BatchExecute for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
#[instrument(name = "BatchRunBuild", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
|
||||
@@ -79,10 +92,19 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunBuild {
|
||||
#[instrument(name = "RunBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
@@ -168,7 +190,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.finalize();
|
||||
let id = update.id.clone();
|
||||
if let Err(e) = update_update(update).await {
|
||||
warn!("failed to modify Update {id} on db | {e:#}");
|
||||
warn!("Failed to modify Update {id} on db | {e:#}");
|
||||
}
|
||||
if !is_server_builder {
|
||||
cancel_clone.cancel();
|
||||
@@ -186,7 +208,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
});
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
build.name.clone(),
|
||||
Some(build.config.version),
|
||||
builder,
|
||||
@@ -197,12 +219,12 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
Ok(builder) => builder,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"failed to get builder for build {} | {e:#}",
|
||||
"Failed to get Builder for Build {} | {e:#}",
|
||||
build.name
|
||||
);
|
||||
update.logs.push(Log::error(
|
||||
"get builder",
|
||||
format_serror(&e.context("failed to get builder").into()),
|
||||
"Get Builder",
|
||||
format_serror(&e.context("Failed to get Builder").into()),
|
||||
));
|
||||
return handle_early_return(
|
||||
update, build.id, build.name, false,
|
||||
@@ -247,18 +269,18 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
replacers: Default::default(),
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
debug!("Build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
info!("Builder cleaned up");
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
};
|
||||
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
debug!("Finished repo clone");
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash =
|
||||
res.res.commit_hash.unwrap_or_default().to_string();
|
||||
@@ -294,11 +316,11 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
commit_hash: optional_string(&update.commit_hash),
|
||||
// Unused for now
|
||||
additional_tags: Default::default(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
}) => res.context("Failed at call to Periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
info!("Build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("User cancelled build during docker build"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
@@ -310,10 +332,10 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.logs.extend(logs);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("error in build | {e:#}");
|
||||
warn!("Error in build | {e:#}");
|
||||
update.push_error_log(
|
||||
"build",
|
||||
format_serror(&e.context("failed to build").into()),
|
||||
"Build Error",
|
||||
format_serror(&e.context("Failed to build").into()),
|
||||
)
|
||||
}
|
||||
};
|
||||
@@ -344,7 +366,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
@@ -363,13 +386,15 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let Build { id, name, .. } = build;
|
||||
|
||||
if update.success {
|
||||
// don't hold response up for user
|
||||
tokio::spawn(async move {
|
||||
handle_post_build_redeploy(&build.id).await;
|
||||
handle_post_build_redeploy(&id).await;
|
||||
});
|
||||
} else {
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let name = name.clone();
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -380,21 +405,27 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
resolved_ts: Some(komodo_timestamp()),
|
||||
resolved: true,
|
||||
level: SeverityLevel::Warning,
|
||||
data: AlertData::BuildFailed {
|
||||
id: build.id,
|
||||
name: build.name,
|
||||
version,
|
||||
},
|
||||
data: AlertData::BuildFailed { id, name, version },
|
||||
};
|
||||
send_alerts(&[alert]).await
|
||||
});
|
||||
}
|
||||
|
||||
if let Err(e) = (RefreshBuildCache { build: name })
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh build cache",
|
||||
format_serror(&e.error.into()),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(update.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
#[instrument("HandleEarlyReturn", skip(update))]
|
||||
async fn handle_early_return(
|
||||
mut update: Update,
|
||||
build_id: String,
|
||||
@@ -418,7 +449,6 @@ async fn handle_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -488,10 +518,19 @@ pub async fn validate_cancel_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
#[instrument(name = "CancelBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CancelBuild",
|
||||
skip(user, update),
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
@@ -539,7 +578,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"failed to set CancelBuild Update status Complete after timeout | {e:#}"
|
||||
"Failed to set CancelBuild Update status Complete after timeout | {e:#}"
|
||||
)
|
||||
}
|
||||
});
|
||||
@@ -548,7 +587,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument("PostBuildRedeploy")]
|
||||
async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let Ok(redeploy_deployments) = find_collect(
|
||||
&db_client().deployments,
|
||||
@@ -584,7 +623,11 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
}
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.resolve(&ExecuteArgs {
|
||||
user,
|
||||
update,
|
||||
id: Uuid::new_v4(),
|
||||
})
|
||||
.await
|
||||
}
|
||||
.await;
|
||||
@@ -610,6 +653,7 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
#[instrument("ValidateRegistryTokens")]
|
||||
async fn validate_account_extract_registry_tokens(
|
||||
Build {
|
||||
config: BuildConfig { image_registry, .. },
|
||||
|
||||
@@ -49,10 +49,18 @@ impl super::BatchExecute for BatchDeploy {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
#[instrument(name = "BatchDeploy", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDeploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDeploy>(&self.pattern, user)
|
||||
@@ -61,6 +69,7 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("SetupDeploy", skip_all)]
|
||||
async fn setup_deployment_execution(
|
||||
deployment: &str,
|
||||
user: &User,
|
||||
@@ -87,10 +96,21 @@ async fn setup_deployment_execution(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for Deploy {
|
||||
#[instrument(name = "Deploy", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"Deploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
stop_signal = format!("{:?}", self.stop_signal),
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -203,7 +223,8 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal: self.stop_signal,
|
||||
@@ -242,6 +263,14 @@ fn pull_cache() -> &'static PullCache {
|
||||
PULL_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"PullDeploymentInner",
|
||||
skip_all,
|
||||
fields(
|
||||
deployment = deployment.id,
|
||||
server = server.id
|
||||
)
|
||||
)]
|
||||
pub async fn pull_deployment_inner(
|
||||
deployment: Deployment,
|
||||
server: &Server,
|
||||
@@ -331,8 +360,9 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let log = match periphery_client(server)?
|
||||
.request(api::image::PullImage {
|
||||
let log = match periphery_client(server)
|
||||
.await?
|
||||
.request(api::docker::PullImage {
|
||||
name: image,
|
||||
account,
|
||||
token,
|
||||
@@ -356,10 +386,19 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
#[instrument(name = "PullDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PullDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -390,10 +429,19 @@ impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
#[instrument(name = "StartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -414,7 +462,8 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -437,10 +486,19 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
#[instrument(name = "RestartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -461,7 +519,8 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -486,10 +545,19 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
#[instrument(name = "PauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -510,7 +578,8 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -533,10 +602,19 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
#[instrument(name = "UnpauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -557,7 +635,8 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -582,10 +661,21 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
#[instrument(name = "StopDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -606,7 +696,8 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -648,10 +739,18 @@ impl super::BatchExecute for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
#[instrument(name = "BatchDestroyDeployment", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDestroyDeployment>(
|
||||
@@ -664,10 +763,21 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
#[instrument(name = "DestroyDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -688,7 +798,8 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
|
||||
@@ -1,18 +1,24 @@
|
||||
use std::sync::OnceLock;
|
||||
use std::{fmt::Write as _, sync::OnceLock};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use command::run_komodo_standard_command;
|
||||
use database::{
|
||||
bson::{Document, doc},
|
||||
mungos::find::find_collect,
|
||||
};
|
||||
use formatting::{bold, format_serror};
|
||||
use futures_util::{StreamExt, stream::FuturesOrdered};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
|
||||
RotateAllServerKeys, RotateCoreKeys,
|
||||
},
|
||||
entities::{
|
||||
deployment::DeploymentState, server::ServerState,
|
||||
stack::StackState,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
@@ -22,8 +28,9 @@ use crate::{
|
||||
api::execute::{
|
||||
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
|
||||
},
|
||||
config::core_config,
|
||||
helpers::update::update_update,
|
||||
config::{core_config, core_keys},
|
||||
helpers::{periphery_client, update::update_update},
|
||||
resource::rotate_server_keys,
|
||||
state::{
|
||||
db_client, deployment_status_cache, server_status_cache,
|
||||
stack_status_cache,
|
||||
@@ -38,13 +45,17 @@ fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(
|
||||
name = "ClearRepoCache",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
"ClearRepoCache",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
@@ -113,13 +124,17 @@ fn backup_database_lock() -> &'static Mutex<()> {
|
||||
|
||||
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
#[instrument(
|
||||
name = "BackupCoreDatabase",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
"BackupCoreDatabase",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
@@ -136,7 +151,7 @@ impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let res = run_komodo_command(
|
||||
let res = run_komodo_standard_command(
|
||||
"Backup Core Database",
|
||||
None,
|
||||
"km database backup --yes",
|
||||
@@ -162,13 +177,17 @@ fn global_update_lock() -> &'static Mutex<()> {
|
||||
|
||||
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
#[instrument(
|
||||
name = "GlobalAutoUpdate",
|
||||
skip(user, update),
|
||||
fields(user_id = user.id, update_id = update.id)
|
||||
"GlobalAutoUpdate",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
@@ -317,3 +336,253 @@ impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_rotate_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
|
||||
#[instrument(
|
||||
"RotateAllServerKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let mut servers = db_client()
|
||||
.servers
|
||||
.find(Document::new())
|
||||
.await
|
||||
.context("Failed to query servers from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
|
||||
let mut log = String::new();
|
||||
|
||||
while let Some(server) = servers.next().await {
|
||||
let server = match server {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
warn!("Failed to parse Server | {e:#}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if !server.config.auto_rotate_keys {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Key Rotation Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let Some(status) = server_status_cache.get(&server.id).await
|
||||
else {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: No Status ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
};
|
||||
match status.state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
match rotate_server_keys(&server).await {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated keys for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate {} keys",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Server Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateCoreKeys {
|
||||
#[instrument(
|
||||
"RotateCoreKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
force = self.force,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let core_keys = core_keys();
|
||||
|
||||
if !core_keys.rotatable() {
|
||||
return Err(anyhow!("Core `private_key` must be pointing to file, for example 'file:/config/keys/core.key'").into());
|
||||
};
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let servers =
|
||||
find_collect(&db_client().servers, Document::new(), None)
|
||||
.await
|
||||
.context("Failed to query servers from database")?
|
||||
.into_iter()
|
||||
.map(|server| async move {
|
||||
let state = server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.state)
|
||||
.unwrap_or(ServerState::NotOk);
|
||||
(server, state)
|
||||
})
|
||||
.collect::<FuturesOrdered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
if !self.force
|
||||
&& let Some((server, _)) = servers
|
||||
.iter()
|
||||
.find(|(_, state)| matches!(state, ServerState::NotOk))
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Server {} is NotOk, stopping key rotation. Pass `force: true` to continue anyways.", server.name).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let public_key = core_keys.rotate().await?.into_inner();
|
||||
|
||||
info!("New Public Key: {public_key}");
|
||||
|
||||
let mut log = format!("New Public Key: {public_key}\n");
|
||||
|
||||
for (server, state) in servers {
|
||||
match state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
// Shouldn't be reached unless 'force: true'
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let res = periphery
|
||||
.request(api::keys::RotateCorePublicKey {
|
||||
public_key: public_key.clone(),
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated key for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate for {}. The new Core public key will have to be added manually.",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Core Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{pin::Pin, time::Instant};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
@@ -8,7 +8,7 @@ use axum_extra::{TypedHeader, headers::ContentType};
|
||||
use database::mungos::by_id::find_one_by_id;
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use futures_util::future::join_all;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
@@ -23,6 +23,7 @@ use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -51,6 +52,9 @@ pub use {
|
||||
};
|
||||
|
||||
pub struct ExecuteArgs {
|
||||
/// The execution id.
|
||||
/// Unique for every /execute call.
|
||||
pub id: Uuid,
|
||||
pub user: User,
|
||||
pub update: Update,
|
||||
}
|
||||
@@ -59,7 +63,7 @@ pub struct ExecuteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[args(ExecuteArgs)]
|
||||
#[response(JsonString)]
|
||||
#[error(serror::Error)]
|
||||
@@ -138,17 +142,19 @@ pub enum ExecuteRequest {
|
||||
RunAction(RunAction),
|
||||
BatchRunAction(BatchRunAction),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== MAINTENANCE ====
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
RotateAllServerKeys(RotateAllServerKeys),
|
||||
RotateCoreKeys(RotateCoreKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -201,7 +207,7 @@ pub fn inner_handler(
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let req_id = Uuid::new_v4();
|
||||
let task_id = Uuid::new_v4();
|
||||
|
||||
// Need to validate no cancel is active before any update is created.
|
||||
// This ensures no double update created if Cancel is called more than once for the same request.
|
||||
@@ -217,14 +223,14 @@ pub fn inner_handler(
|
||||
// here either.
|
||||
if update.operation == Operation::None {
|
||||
return Ok(ExecutionResult::Batch(
|
||||
task(req_id, request, user, update).await?,
|
||||
task(task_id, request, user, update).await?,
|
||||
));
|
||||
}
|
||||
|
||||
// Spawn a task for the execution which continues
|
||||
// running after this method returns.
|
||||
let handle =
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
tokio::spawn(task(task_id, request, user, update.clone()));
|
||||
|
||||
// Spawns another task to monitor the first for failures,
|
||||
// and add the log to Update about it (which primary task can't do because it errored out)
|
||||
@@ -233,11 +239,11 @@ pub fn inner_handler(
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
warn!("/execute request {task_id} task error: {e:#}",);
|
||||
Log::error("Task Error", format_serror(&e.into()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
warn!("/execute request {task_id} spawn error: {e:?}",);
|
||||
Log::error("Spawn Error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
@@ -271,40 +277,33 @@ pub fn inner_handler(
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteRequest",
|
||||
skip(user, update),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{:?}", request.extract_variant()))
|
||||
)
|
||||
]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
id: Uuid,
|
||||
request: ExecuteRequest,
|
||||
user: User,
|
||||
update: Update,
|
||||
) -> anyhow::Result<String> {
|
||||
info!("/execute request {req_id} | user: {}", user.username);
|
||||
let timer = Instant::now();
|
||||
let variant = request.extract_variant();
|
||||
|
||||
let res = match request.resolve(&ExecuteArgs { user, update }).await
|
||||
{
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e).context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
info!(
|
||||
"/execute request {id} | {variant} | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res =
|
||||
match request.resolve(&ExecuteArgs { user, update, id }).await {
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e)
|
||||
.context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} error: {e:#}");
|
||||
warn!("/execute request {id} error: {e:#}");
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
@@ -313,6 +312,7 @@ trait BatchExecute {
|
||||
fn single_request(name: String) -> ExecuteRequest;
|
||||
}
|
||||
|
||||
#[instrument("BatchExecute", skip(user))]
|
||||
async fn batch_execute<E: BatchExecute>(
|
||||
pattern: &str,
|
||||
user: &User,
|
||||
@@ -325,6 +325,7 @@ async fn batch_execute<E: BatchExecute>(
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let futures = resources.into_iter().map(|resource| {
|
||||
let user = user.clone();
|
||||
async move {
|
||||
|
||||
@@ -38,7 +38,11 @@ impl super::BatchExecute for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
#[instrument(name = "BatchRunProcedure", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunProcedure",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
@@ -51,10 +55,19 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunProcedure {
|
||||
#[instrument(name = "RunProcedure", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
procedure = self.procedure,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(
|
||||
resolve_inner(self.procedure, user.clone(), update.clone())
|
||||
@@ -146,7 +159,6 @@ fn resolve_inner(
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && procedure.config.failure_alert {
|
||||
warn!("procedure unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
|
||||
@@ -30,7 +30,7 @@ use crate::{
|
||||
alert::send_alerts,
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
channel::repo_cancel_channel,
|
||||
git_token, periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
@@ -51,10 +51,18 @@ impl super::BatchExecute for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
#[instrument(name = "BatchCloneRepo", skip( user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchCloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
|
||||
@@ -64,10 +72,19 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
#[instrument(name = "CloneRepo", skip( user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
@@ -105,7 +122,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -165,10 +182,18 @@ impl super::BatchExecute for BatchPullRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchPullRepo {
|
||||
#[instrument(name = "BatchPullRepo", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchPullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
|
||||
@@ -178,10 +203,19 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullRepo {
|
||||
#[instrument(name = "PullRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
@@ -220,7 +254,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -275,7 +309,11 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
#[instrument(
|
||||
"HandleRepoEarlyReturn",
|
||||
skip_all,
|
||||
fields(update_id = update.id)
|
||||
)]
|
||||
async fn handle_repo_update_return(
|
||||
update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
@@ -297,7 +335,7 @@ async fn handle_repo_update_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument("UpdateLastPulledTime")]
|
||||
async fn update_last_pulled_time(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.repos
|
||||
@@ -321,10 +359,18 @@ impl super::BatchExecute for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
#[instrument(name = "BatchBuildRepo", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchBuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
|
||||
@@ -334,10 +380,19 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
#[instrument(name = "BuildRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"BuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
@@ -419,7 +474,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
repo.name.clone(),
|
||||
None,
|
||||
builder,
|
||||
@@ -463,7 +518,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_builder_early_return(update, repo.id, repo.name, true).await
|
||||
@@ -510,7 +565,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
@@ -530,7 +586,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -553,7 +608,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
#[instrument("HandleRepoBuildEarlyReturn", skip(update))]
|
||||
async fn handle_builder_early_return(
|
||||
mut update: Update,
|
||||
repo_id: String,
|
||||
@@ -577,7 +632,6 @@ async fn handle_builder_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -598,7 +652,6 @@ async fn handle_builder_early_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn validate_cancel_repo_build(
|
||||
request: &ExecuteRequest,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -648,10 +701,19 @@ pub async fn validate_cancel_repo_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
#[instrument(name = "CancelRepoBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CancelRepoBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
@@ -708,6 +770,13 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"Interpolate",
|
||||
skip_all,
|
||||
fields(
|
||||
skip_secret_interp = repo.config.skip_secret_interp
|
||||
)
|
||||
)]
|
||||
async fn interpolate(
|
||||
repo: &mut Repo,
|
||||
update: &mut Update,
|
||||
|
||||
@@ -22,10 +22,20 @@ use crate::{
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartContainer {
|
||||
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -50,7 +60,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
@@ -76,10 +86,20 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -104,7 +124,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RestartContainer {
|
||||
@@ -132,10 +152,20 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
#[instrument(name = "PauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -160,7 +190,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PauseContainer {
|
||||
@@ -186,10 +216,20 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
#[instrument(name = "UnpauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -214,7 +254,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::UnpauseContainer {
|
||||
@@ -242,10 +282,22 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopContainer {
|
||||
#[instrument(name = "StopContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -270,7 +322,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
@@ -298,10 +350,22 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
#[instrument(name = "DestroyContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DestroyContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let DestroyContainer {
|
||||
server,
|
||||
@@ -332,7 +396,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
@@ -360,10 +424,19 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
#[instrument(name = "StartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -387,7 +460,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartAllContainers {})
|
||||
.await
|
||||
.context("failed to start all containers on host")?;
|
||||
@@ -410,10 +484,19 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
#[instrument(name = "RestartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -437,7 +520,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartAllContainers {})
|
||||
.await
|
||||
.context("failed to restart all containers on host")?;
|
||||
@@ -462,10 +546,19 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
#[instrument(name = "PauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -489,7 +582,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseAllContainers {})
|
||||
.await
|
||||
.context("failed to pause all containers on host")?;
|
||||
@@ -512,10 +606,19 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
#[instrument(name = "UnpauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -539,7 +642,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseAllContainers {})
|
||||
.await
|
||||
.context("failed to unpause all containers on host")?;
|
||||
@@ -564,10 +668,19 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
#[instrument(name = "StopAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -591,7 +704,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopAllContainers {})
|
||||
.await
|
||||
.context("failed to stop all containers on host")?;
|
||||
@@ -614,10 +728,19 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
#[instrument(name = "PruneContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -641,7 +764,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
@@ -670,10 +793,20 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
#[instrument(name = "DeleteNetwork", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteNetwork",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
network = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -686,10 +819,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::DeleteNetwork {
|
||||
.request(api::docker::DeleteNetwork {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -721,10 +854,19 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
#[instrument(name = "PruneNetworks", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneNetworks",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -748,10 +890,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::PruneNetworks {})
|
||||
.request(api::docker::PruneNetworks {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
@@ -775,10 +917,20 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
#[instrument(name = "DeleteImage", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteImage",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
image = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -791,10 +943,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::image::DeleteImage {
|
||||
.request(api::docker::DeleteImage {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -823,10 +975,19 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneImages {
|
||||
#[instrument(name = "PruneImages", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneImages",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -850,10 +1011,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::image::PruneImages {}).await {
|
||||
match periphery.request(api::docker::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune images",
|
||||
@@ -875,10 +1036,20 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
#[instrument(name = "DeleteVolume", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteVolume",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
volume = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -891,10 +1062,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::volume::DeleteVolume {
|
||||
.request(api::docker::DeleteVolume {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -926,10 +1097,19 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
#[instrument(name = "PruneVolumes", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneVolumes",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -953,10 +1133,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::volume::PruneVolumes {}).await {
|
||||
match periphery.request(api::docker::PruneVolumes {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune volumes",
|
||||
@@ -978,10 +1158,19 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
#[instrument(name = "PruneDockerBuilders", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneDockerBuilders",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -1005,7 +1194,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuilders {}).await {
|
||||
@@ -1030,10 +1219,19 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
#[instrument(name = "PruneBuildx", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneBuildx",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -1057,7 +1255,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuildx {}).await {
|
||||
@@ -1082,10 +1280,19 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
#[instrument(name = "PruneSystem", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneSystem",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
@@ -1109,7 +1316,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery.request(api::PruneSystem {}).await {
|
||||
Ok(log) => log,
|
||||
|
||||
@@ -22,6 +22,7 @@ use komodo_client::{
|
||||
};
|
||||
use periphery_client::api::compose::*;
|
||||
use resolver_api::Resolve;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
@@ -54,10 +55,18 @@ impl super::BatchExecute for BatchDeployStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDeployStack {
|
||||
#[instrument(name = "BatchDeployStack", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDeployStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDeployStack>(&self.pattern, user)
|
||||
@@ -67,10 +76,21 @@ impl Resolve<ExecuteArgs> for BatchDeployStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeployStack {
|
||||
#[instrument(name = "DeployStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeployStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
&self.stack,
|
||||
@@ -155,7 +175,8 @@ impl Resolve<ExecuteArgs> for DeployStack {
|
||||
compose_config,
|
||||
commit_hash,
|
||||
commit_message,
|
||||
} = periphery_client(&server)?
|
||||
} = periphery_client(&server)
|
||||
.await?
|
||||
.request(ComposeUp {
|
||||
stack: stack.clone(),
|
||||
services: self.services,
|
||||
@@ -280,10 +301,18 @@ impl super::BatchExecute for BatchDeployStackIfChanged {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDeployStackIfChanged {
|
||||
#[instrument(name = "BatchDeployStackIfChanged", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDeployStackIfChanged",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDeployStackIfChanged>(
|
||||
@@ -296,10 +325,20 @@ impl Resolve<ExecuteArgs> for BatchDeployStackIfChanged {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
#[instrument(name = "DeployStackIfChanged", skip(user, update), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"DeployStackIfChanged",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
@@ -357,6 +396,7 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
id: *id,
|
||||
})
|
||||
.await
|
||||
}
|
||||
@@ -466,6 +506,14 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"DeployStackServices",
|
||||
skip_all,
|
||||
fields(
|
||||
stack = stack,
|
||||
services = format!("{services:?}")
|
||||
)
|
||||
)]
|
||||
async fn deploy_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
@@ -487,10 +535,19 @@ async fn deploy_services(
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
id: Uuid::new_v4(),
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"RestartStackServices",
|
||||
skip_all,
|
||||
fields(
|
||||
stack = stack,
|
||||
services = format!("{services:?}")
|
||||
)
|
||||
)]
|
||||
async fn restart_services(
|
||||
stack: String,
|
||||
services: Vec<String>,
|
||||
@@ -509,6 +566,7 @@ async fn restart_services(
|
||||
.resolve(&ExecuteArgs {
|
||||
user: user.clone(),
|
||||
update,
|
||||
id: Uuid::new_v4(),
|
||||
})
|
||||
.await
|
||||
}
|
||||
@@ -525,6 +583,11 @@ async fn restart_services(
|
||||
/// Changes to config files after restart is applied should
|
||||
/// be taken as the deployed contents, otherwise next changed check
|
||||
/// will restart service again for no reason.
|
||||
#[instrument(
|
||||
"UpdateStackDeployedContents",
|
||||
skip_all,
|
||||
fields(stack = id)
|
||||
)]
|
||||
async fn update_deployed_contents_with_latest(
|
||||
id: &str,
|
||||
contents: Option<Vec<StackRemoteFileContents>>,
|
||||
@@ -662,10 +725,18 @@ impl super::BatchExecute for BatchPullStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchPullStack {
|
||||
#[instrument(name = "BatchPullStack", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchPullStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchPullStack>(&self.pattern, user)
|
||||
@@ -699,6 +770,14 @@ async fn maybe_pull_stack(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"PullStackInner",
|
||||
skip_all,
|
||||
fields(
|
||||
stack = stack.id,
|
||||
services = format!("{services:?}"),
|
||||
)
|
||||
)]
|
||||
pub async fn pull_stack_inner(
|
||||
mut stack: Stack,
|
||||
services: Vec<String>,
|
||||
@@ -749,7 +828,8 @@ pub async fn pull_stack_inner(
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let res = periphery_client(server)?
|
||||
let res = periphery_client(server)
|
||||
.await?
|
||||
.request(ComposePull {
|
||||
stack,
|
||||
services,
|
||||
@@ -767,10 +847,20 @@ pub async fn pull_stack_inner(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullStack {
|
||||
#[instrument(name = "PullStack", skip(user, update), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"PullStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&self.stack,
|
||||
@@ -820,10 +910,20 @@ impl Resolve<ExecuteArgs> for PullStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartStack {
|
||||
#[instrument(name = "StartStack", skip(user, update), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"StartStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<StartStack>(
|
||||
&self.stack,
|
||||
@@ -839,10 +939,20 @@ impl Resolve<ExecuteArgs> for StartStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartStack {
|
||||
#[instrument(name = "RestartStack", skip(user, update), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"RestartStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<RestartStack>(
|
||||
&self.stack,
|
||||
@@ -860,10 +970,20 @@ impl Resolve<ExecuteArgs> for RestartStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseStack {
|
||||
#[instrument(name = "PauseStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<PauseStack>(
|
||||
&self.stack,
|
||||
@@ -879,10 +999,20 @@ impl Resolve<ExecuteArgs> for PauseStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseStack {
|
||||
#[instrument(name = "UnpauseStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<UnpauseStack>(
|
||||
&self.stack,
|
||||
@@ -898,10 +1028,20 @@ impl Resolve<ExecuteArgs> for UnpauseStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopStack {
|
||||
#[instrument(name = "StopStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<StopStack>(
|
||||
&self.stack,
|
||||
@@ -929,10 +1069,18 @@ impl super::BatchExecute for BatchDestroyStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDestroyStack {
|
||||
#[instrument(name = "BatchDestroyStack", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDestroyStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
super::batch_execute::<BatchDestroyStack>(&self.pattern, user)
|
||||
.await
|
||||
@@ -941,10 +1089,22 @@ impl Resolve<ExecuteArgs> for BatchDestroyStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyStack {
|
||||
#[instrument(name = "DestroyStack", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DestroyStack",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
services = format!("{:?}", self.services),
|
||||
remove_orphans = self.remove_orphans,
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
execute_compose::<DestroyStack>(
|
||||
&self.stack,
|
||||
@@ -960,10 +1120,21 @@ impl Resolve<ExecuteArgs> for DestroyStack {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunStackService {
|
||||
#[instrument(name = "RunStackService", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunStackService",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
stack = self.stack,
|
||||
service = self.service,
|
||||
request = format!("{self:?}"),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
&self.stack,
|
||||
@@ -1022,7 +1193,8 @@ impl Resolve<ExecuteArgs> for RunStackService {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let log = periphery_client(&server)?
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
.request(ComposeRun {
|
||||
stack,
|
||||
repo,
|
||||
|
||||
@@ -49,10 +49,21 @@ use crate::{
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunSync {
|
||||
#[instrument(name = "RunSync", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunSync",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
sync = self.sync,
|
||||
resource_type = format!("{:?}", self.resource_type),
|
||||
resources = format!("{:?}", self.resources),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let RunSync {
|
||||
sync,
|
||||
@@ -125,34 +136,10 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
};
|
||||
match ObjectId::from_str(&name_or_id) {
|
||||
Ok(_) => match resource_type {
|
||||
ResourceTargetVariant::Alerter => all_resources
|
||||
.alerters
|
||||
ResourceTargetVariant::Swarm => all_resources
|
||||
.swarms
|
||||
.get(&name_or_id)
|
||||
.map(|a| a.name.clone()),
|
||||
ResourceTargetVariant::Build => all_resources
|
||||
.builds
|
||||
.get(&name_or_id)
|
||||
.map(|b| b.name.clone()),
|
||||
ResourceTargetVariant::Builder => all_resources
|
||||
.builders
|
||||
.get(&name_or_id)
|
||||
.map(|b| b.name.clone()),
|
||||
ResourceTargetVariant::Deployment => all_resources
|
||||
.deployments
|
||||
.get(&name_or_id)
|
||||
.map(|d| d.name.clone()),
|
||||
ResourceTargetVariant::Procedure => all_resources
|
||||
.procedures
|
||||
.get(&name_or_id)
|
||||
.map(|p| p.name.clone()),
|
||||
ResourceTargetVariant::Action => all_resources
|
||||
.actions
|
||||
.get(&name_or_id)
|
||||
.map(|p| p.name.clone()),
|
||||
ResourceTargetVariant::Repo => all_resources
|
||||
.repos
|
||||
.get(&name_or_id)
|
||||
.map(|r| r.name.clone()),
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::Server => all_resources
|
||||
.servers
|
||||
.get(&name_or_id)
|
||||
@@ -161,10 +148,38 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
.stacks
|
||||
.get(&name_or_id)
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::Deployment => all_resources
|
||||
.deployments
|
||||
.get(&name_or_id)
|
||||
.map(|d| d.name.clone()),
|
||||
ResourceTargetVariant::Build => all_resources
|
||||
.builds
|
||||
.get(&name_or_id)
|
||||
.map(|b| b.name.clone()),
|
||||
ResourceTargetVariant::Repo => all_resources
|
||||
.repos
|
||||
.get(&name_or_id)
|
||||
.map(|r| r.name.clone()),
|
||||
ResourceTargetVariant::Procedure => all_resources
|
||||
.procedures
|
||||
.get(&name_or_id)
|
||||
.map(|p| p.name.clone()),
|
||||
ResourceTargetVariant::Action => all_resources
|
||||
.actions
|
||||
.get(&name_or_id)
|
||||
.map(|p| p.name.clone()),
|
||||
ResourceTargetVariant::ResourceSync => all_resources
|
||||
.syncs
|
||||
.get(&name_or_id)
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::Builder => all_resources
|
||||
.builders
|
||||
.get(&name_or_id)
|
||||
.map(|b| b.name.clone()),
|
||||
ResourceTargetVariant::Alerter => all_resources
|
||||
.alerters
|
||||
.get(&name_or_id)
|
||||
.map(|a| a.name.clone()),
|
||||
ResourceTargetVariant::System => None,
|
||||
},
|
||||
Err(_) => Some(name_or_id),
|
||||
|
||||
@@ -9,14 +9,14 @@ use komodo_client::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
},
|
||||
entities::{
|
||||
deployment::Deployment, server::Server, stack::Stack,
|
||||
sync::ResourceSync,
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config, permission::get_resource_ids_for_user,
|
||||
config::core_config, permission::list_resource_ids_for_user,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
@@ -31,14 +31,29 @@ impl Resolve<ReadArgs> for ListAlerts {
|
||||
) -> serror::Result<ListAlertsResponse> {
|
||||
let mut query = self.query.unwrap_or_default();
|
||||
if !user.admin && !core_config().transparent_mode {
|
||||
let server_ids =
|
||||
get_resource_ids_for_user::<Server>(user).await?;
|
||||
let stack_ids =
|
||||
get_resource_ids_for_user::<Stack>(user).await?;
|
||||
let deployment_ids =
|
||||
get_resource_ids_for_user::<Deployment>(user).await?;
|
||||
let sync_ids =
|
||||
get_resource_ids_for_user::<ResourceSync>(user).await?;
|
||||
let (server_ids, stack_ids, deployment_ids, sync_ids) = tokio::try_join!(
|
||||
list_resource_ids_for_user::<Server>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<Stack>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<Deployment>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
),
|
||||
list_resource_ids_for_user::<ResourceSync>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
)?;
|
||||
// All of the vecs will be non-none if !admin and !transparent mode.
|
||||
query.extend(doc! {
|
||||
"$or": [
|
||||
{ "target.type": "Server", "target.id": { "$in": &server_ids } },
|
||||
|
||||
@@ -11,8 +11,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::db_client,
|
||||
helpers::query::get_all_tags,
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetAlertersSummary {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetAlertersSummaryResponse> {
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
Alerter,
|
||||
>(user)
|
||||
let query = match list_resource_ids_for_user::<Alerter>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
|
||||
@@ -6,13 +6,12 @@ use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use futures_util::TryStreamExt;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
Operation,
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
update::UpdateStatus,
|
||||
},
|
||||
@@ -20,13 +19,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{
|
||||
action_states, build_state_cache, db_client, github_client,
|
||||
},
|
||||
state::{action_states, build_state_cache, db_client},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -306,81 +302,3 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetBuildWebhookEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetBuildWebhookEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com"
|
||||
|| build.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::db_client,
|
||||
helpers::query::get_all_tags,
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -82,9 +84,11 @@ impl Resolve<ReadArgs> for GetBuildersSummary {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetBuildersSummaryResponse> {
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
Builder,
|
||||
>(user)
|
||||
let query = match list_resource_ids_for_user::<Builder>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
|
||||
@@ -145,7 +145,8 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLog {
|
||||
name,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -183,7 +184,8 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLogSearch {
|
||||
name,
|
||||
terms,
|
||||
@@ -234,7 +236,8 @@ impl Resolve<ReadArgs> for InspectDeploymentContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -262,7 +265,8 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerStats { name })
|
||||
.await
|
||||
.context("failed to get stats from periphery")?;
|
||||
@@ -321,7 +325,9 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
if !deployment.template {
|
||||
res.unknown += 1;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
res.unhealthy += 1;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{collections::HashSet, sync::OnceLock, time::Instant};
|
||||
use std::{collections::HashSet, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
@@ -27,7 +27,9 @@ use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, config::core_config, helpers::periphery_client,
|
||||
auth::auth_request,
|
||||
config::{core_config, core_keys},
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
};
|
||||
|
||||
@@ -39,6 +41,7 @@ mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
@@ -46,8 +49,10 @@ mod repo;
|
||||
mod schedule;
|
||||
mod server;
|
||||
mod stack;
|
||||
mod swarm;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod terminal;
|
||||
mod toml;
|
||||
mod update;
|
||||
mod user;
|
||||
@@ -71,73 +76,67 @@ enum ReadRequest {
|
||||
ListGitProvidersFromConfig(ListGitProvidersFromConfig),
|
||||
ListDockerRegistriesFromConfig(ListDockerRegistriesFromConfig),
|
||||
|
||||
// ==== USER ====
|
||||
GetUsername(GetUsername),
|
||||
GetPermission(GetPermission),
|
||||
FindUser(FindUser),
|
||||
ListUsers(ListUsers),
|
||||
ListApiKeys(ListApiKeys),
|
||||
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
|
||||
ListPermissions(ListPermissions),
|
||||
ListUserTargetPermissions(ListUserTargetPermissions),
|
||||
|
||||
// ==== USER GROUP ====
|
||||
GetUserGroup(GetUserGroup),
|
||||
ListUserGroups(ListUserGroups),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
GetProceduresSummary(GetProceduresSummary),
|
||||
GetProcedure(GetProcedure),
|
||||
GetProcedureActionState(GetProcedureActionState),
|
||||
ListProcedures(ListProcedures),
|
||||
ListFullProcedures(ListFullProcedures),
|
||||
|
||||
// ==== ACTION ====
|
||||
GetActionsSummary(GetActionsSummary),
|
||||
GetAction(GetAction),
|
||||
GetActionActionState(GetActionActionState),
|
||||
ListActions(ListActions),
|
||||
ListFullActions(ListFullActions),
|
||||
|
||||
// ==== SCHEDULE ====
|
||||
ListSchedules(ListSchedules),
|
||||
// ==== SWARM ====
|
||||
GetSwarmsSummary(GetSwarmsSummary),
|
||||
GetSwarm(GetSwarm),
|
||||
GetSwarmActionState(GetSwarmActionState),
|
||||
ListSwarms(ListSwarms),
|
||||
InspectSwarm(InspectSwarm),
|
||||
ListFullSwarms(ListFullSwarms),
|
||||
ListSwarmNodes(ListSwarmNodes),
|
||||
InspectSwarmNode(InspectSwarmNode),
|
||||
ListSwarmConfigs(ListSwarmConfigs),
|
||||
InspectSwarmConfig(InspectSwarmConfig),
|
||||
ListSwarmSecrets(ListSwarmSecrets),
|
||||
InspectSwarmSecret(InspectSwarmSecret),
|
||||
ListSwarmStacks(ListSwarmStacks),
|
||||
InspectSwarmStack(InspectSwarmStack),
|
||||
ListSwarmTasks(ListSwarmTasks),
|
||||
InspectSwarmTask(InspectSwarmTask),
|
||||
ListSwarmServices(ListSwarmServices),
|
||||
InspectSwarmService(InspectSwarmService),
|
||||
GetSwarmServiceLog(GetSwarmServiceLog),
|
||||
SearchSwarmServiceLog(SearchSwarmServiceLog),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetPeripheryInformation(GetPeripheryInformation),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
|
||||
// ==== TERMINAL ====
|
||||
ListTerminals(ListTerminals),
|
||||
|
||||
// ==== DOCKER ====
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
InspectDockerContainer(InspectDockerContainer),
|
||||
GetResourceMatchingContainer(GetResourceMatchingContainer),
|
||||
GetContainerLog(GetContainerLog),
|
||||
SearchContainerLog(SearchContainerLog),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
InspectDockerNetwork(InspectDockerNetwork),
|
||||
ListDockerImages(ListDockerImages),
|
||||
InspectDockerImage(InspectDockerImage),
|
||||
ListDockerImageHistory(ListDockerImageHistory),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
ListDockerImages(ListDockerImages),
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListTerminals(ListTerminals),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
GetSystemStats(GetSystemStats),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
|
||||
GetStackLog(GetStackLog),
|
||||
SearchStackLog(SearchStackLog),
|
||||
InspectStackContainer(InspectStackContainer),
|
||||
@@ -166,7 +165,6 @@ enum ReadRequest {
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
ListBuildVersions(ListBuildVersions),
|
||||
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
|
||||
ListBuilds(ListBuilds),
|
||||
ListFullBuilds(ListFullBuilds),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
@@ -175,15 +173,30 @@ enum ReadRequest {
|
||||
GetReposSummary(GetReposSummary),
|
||||
GetRepo(GetRepo),
|
||||
GetRepoActionState(GetRepoActionState),
|
||||
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
|
||||
ListRepos(ListRepos),
|
||||
ListFullRepos(ListFullRepos),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
GetProceduresSummary(GetProceduresSummary),
|
||||
GetProcedure(GetProcedure),
|
||||
GetProcedureActionState(GetProcedureActionState),
|
||||
ListProcedures(ListProcedures),
|
||||
ListFullProcedures(ListFullProcedures),
|
||||
|
||||
// ==== ACTION ====
|
||||
GetActionsSummary(GetActionsSummary),
|
||||
GetAction(GetAction),
|
||||
GetActionActionState(GetActionActionState),
|
||||
ListActions(ListActions),
|
||||
ListFullActions(ListFullActions),
|
||||
|
||||
// ==== SCHEDULE ====
|
||||
ListSchedules(ListSchedules),
|
||||
|
||||
// ==== SYNC ====
|
||||
GetResourceSyncsSummary(GetResourceSyncsSummary),
|
||||
GetResourceSync(GetResourceSync),
|
||||
GetResourceSyncActionState(GetResourceSyncActionState),
|
||||
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
|
||||
ListResourceSyncs(ListResourceSyncs),
|
||||
ListFullResourceSyncs(ListFullResourceSyncs),
|
||||
|
||||
@@ -207,6 +220,20 @@ enum ReadRequest {
|
||||
GetTag(GetTag),
|
||||
ListTags(ListTags),
|
||||
|
||||
// ==== USER ====
|
||||
GetUsername(GetUsername),
|
||||
GetPermission(GetPermission),
|
||||
FindUser(FindUser),
|
||||
ListUsers(ListUsers),
|
||||
ListApiKeys(ListApiKeys),
|
||||
ListApiKeysForServiceUser(ListApiKeysForServiceUser),
|
||||
ListPermissions(ListPermissions),
|
||||
ListUserTargetPermissions(ListUserTargetPermissions),
|
||||
|
||||
// ==== USER GROUP ====
|
||||
GetUserGroup(GetUserGroup),
|
||||
ListUserGroups(ListUserGroups),
|
||||
|
||||
// ==== UPDATE ====
|
||||
GetUpdate(GetUpdate),
|
||||
ListUpdates(ListUpdates),
|
||||
@@ -224,6 +251,9 @@ enum ReadRequest {
|
||||
ListGitProviderAccounts(ListGitProviderAccounts),
|
||||
GetDockerRegistryAccount(GetDockerRegistryAccount),
|
||||
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
ListOnboardingKeys(ListOnboardingKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -245,7 +275,6 @@ async fn variant_handler(
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ReadRequest>,
|
||||
@@ -273,11 +302,13 @@ impl Resolve<ReadArgs> for GetVersion {
|
||||
}
|
||||
}
|
||||
|
||||
fn core_info() -> &'static GetCoreInfoResponse {
|
||||
static CORE_INFO: OnceLock<GetCoreInfoResponse> = OnceLock::new();
|
||||
CORE_INFO.get_or_init(|| {
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
let config = core_config();
|
||||
GetCoreInfoResponse {
|
||||
let info = GetCoreInfoResponse {
|
||||
title: config.title.clone(),
|
||||
monitoring_interval: config.monitoring_interval,
|
||||
webhook_base_url: if config.webhook_base_url.is_empty() {
|
||||
@@ -291,23 +322,10 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
enable_fancy_toml: config.enable_fancy_toml,
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
.iter()
|
||||
.map(|i| i.namespace.to_string())
|
||||
.collect(),
|
||||
timezone: config.timezone.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
Ok(core_info().clone())
|
||||
public_key: core_keys().load().public.to_string(),
|
||||
};
|
||||
Ok(info)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,7 +361,8 @@ impl Resolve<ReadArgs> for ListSecrets {
|
||||
};
|
||||
if let Some(id) = server_id {
|
||||
let server = resource::get::<Server>(&id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListSecrets {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -515,7 +534,8 @@ async fn merge_git_providers_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListGitProviders {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -553,7 +573,8 @@ async fn merge_docker_registries_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListDockerRegistries {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
|
||||
51
bin/core/src/api/read/onboarding_key.rs
Normal file
51
bin/core/src/api/read/onboarding_key.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::api::read::{
|
||||
ListOnboardingKeys, ListOnboardingKeysResponse,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::read::ReadArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListOnboardingKeys {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user: admin }: &ReadArgs,
|
||||
) -> serror::Result<ListOnboardingKeysResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut keys =
|
||||
find_collect(&db_client().onboarding_keys, None, None)
|
||||
.await
|
||||
.context(
|
||||
"Failed to query database for Server onboarding keys",
|
||||
)?;
|
||||
|
||||
// No expiry keys first, followed
|
||||
keys.sort_by(|a, b| {
|
||||
if a.expires == b.expires {
|
||||
Ordering::Equal
|
||||
} else if a.expires == 0 {
|
||||
Ordering::Less
|
||||
} else if b.expires == 0 {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
// Descending
|
||||
b.expires.cmp(&a.expires)
|
||||
}
|
||||
});
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoActionState, RepoListItem, RepoState},
|
||||
},
|
||||
@@ -10,11 +9,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, github_client, repo_state_cache},
|
||||
state::{action_states, repo_state_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -142,7 +140,11 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
(RepoState::Unknown, _) => {
|
||||
if !repo.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _)
|
||||
| (RepoState::Pulling, _)
|
||||
@@ -155,104 +157,3 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetRepoWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetRepoWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com"
|
||||
|| repo.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let clone_url =
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id);
|
||||
let pull_url =
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id);
|
||||
let build_url =
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id);
|
||||
|
||||
let mut clone_enabled = false;
|
||||
let mut pull_enabled = false;
|
||||
let mut build_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if !webhook.active {
|
||||
continue;
|
||||
}
|
||||
if webhook.config.url == clone_url {
|
||||
clone_enabled = true
|
||||
}
|
||||
if webhook.config.url == pull_url {
|
||||
pull_enabled = true
|
||||
}
|
||||
if webhook.config.url == build_url {
|
||||
build_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
clone_enabled,
|
||||
pull_enabled,
|
||||
build_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use futures::future::join_all;
|
||||
use futures_util::future::join_all;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
|
||||
@@ -25,11 +25,10 @@ use komodo_client::{
|
||||
network::Network,
|
||||
volume::Volume,
|
||||
},
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{
|
||||
Server, ServerActionState, ServerListItem, ServerState,
|
||||
TerminalInfo,
|
||||
Server, ServerActionState, ServerListItem, ServerQuery,
|
||||
ServerState,
|
||||
},
|
||||
stack::{Stack, StackServiceNames},
|
||||
stats::{SystemInformation, SystemProcess},
|
||||
@@ -39,19 +38,18 @@ use komodo_client::{
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
image::{ImageHistory, InspectImage},
|
||||
network::InspectNetwork,
|
||||
volume::InspectVolume,
|
||||
docker::{
|
||||
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::{get_all_tags, get_system_info},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::{get_check_permissions, list_resources_for_user},
|
||||
resource,
|
||||
stack::compose_container_match_regex,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
@@ -80,11 +78,8 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
// Check for version mismatch
|
||||
let has_version_mismatch = !server.info.version.is_empty()
|
||||
&& server.info.version != "Unknown"
|
||||
&& server.info.version != core_version;
|
||||
|
||||
if has_version_mismatch {
|
||||
if matches!(&server.info.version, Some(version) if version != core_version)
|
||||
{
|
||||
res.warning += 1;
|
||||
} else {
|
||||
res.healthy += 1;
|
||||
@@ -94,7 +89,9 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
ServerState::Disabled => {
|
||||
res.disabled += 1;
|
||||
if !server.template {
|
||||
res.disabled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -102,26 +99,6 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryVersion {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryVersionResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let version = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("unknown"));
|
||||
Ok(GetPeripheryVersionResponse { version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServer {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -225,6 +202,29 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryInformationResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.periphery_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing Periphery Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -235,8 +235,17 @@ impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
get_system_info(&server).await.map_err(Into::into)
|
||||
.await
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing system Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,15 +260,15 @@ impl Resolve<ReadArgs> for GetSystemStats {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let status =
|
||||
server_status_cache().get(&server.id).await.with_context(
|
||||
|| format!("did not find status for server at {}", server.id),
|
||||
)?;
|
||||
let stats = status
|
||||
.stats
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.context("server stats not available")?;
|
||||
Ok(stats.clone())
|
||||
.cloned()
|
||||
.context("Server status missing system stats. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -289,7 +298,8 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
let stats = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
@@ -373,8 +383,8 @@ impl Resolve<ReadArgs> for ListDockerContainers {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(containers) = &cache.containers {
|
||||
Ok(containers.clone())
|
||||
if let Some(docker) = &cache.docker {
|
||||
Ok(docker.containers.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
@@ -387,18 +397,12 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListAllDockerContainersResponse> {
|
||||
let servers = resource::list_for_user::<Server>(
|
||||
Default::default(),
|
||||
ServerQuery::builder().names(self.servers.clone()).build(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|server| {
|
||||
self.servers.is_empty()
|
||||
|| self.servers.contains(&server.id)
|
||||
|| self.servers.contains(&server.name)
|
||||
});
|
||||
.await?;
|
||||
|
||||
let mut containers = Vec::<ContainerListItem>::new();
|
||||
|
||||
@@ -406,9 +410,18 @@ impl Resolve<ReadArgs> for ListAllDockerContainers {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(more_containers) = &cache.containers {
|
||||
containers.extend(more_containers.clone());
|
||||
}
|
||||
let Some(docker) = &cache.docker else {
|
||||
continue;
|
||||
};
|
||||
let more = docker
|
||||
.containers
|
||||
.iter()
|
||||
.filter(|container| {
|
||||
self.containers.is_empty()
|
||||
|| self.containers.contains(&container.name)
|
||||
})
|
||||
.cloned();
|
||||
containers.extend(more);
|
||||
}
|
||||
|
||||
Ok(containers)
|
||||
@@ -436,8 +449,8 @@ impl Resolve<ReadArgs> for GetDockerContainersSummary {
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
|
||||
if let Some(containers) = &cache.containers {
|
||||
for container in containers {
|
||||
if let Some(docker) = &cache.docker {
|
||||
for container in &docker.containers {
|
||||
res.total += 1;
|
||||
match container.state {
|
||||
ContainerStateStatusEnum::Created
|
||||
@@ -478,7 +491,8 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.container,
|
||||
})
|
||||
@@ -506,7 +520,8 @@ impl Resolve<ReadArgs> for GetContainerLog {
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLog {
|
||||
name: container,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -537,7 +552,8 @@ impl Resolve<ReadArgs> for SearchContainerLog {
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLogSearch {
|
||||
name: container,
|
||||
terms,
|
||||
@@ -572,12 +588,12 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
|
||||
}
|
||||
|
||||
// then check stacks
|
||||
let stacks =
|
||||
resource::list_full_for_user_using_document::<Stack>(
|
||||
doc! { "config.server_id": &server.id },
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
let stacks = list_resources_for_user::<Stack>(
|
||||
doc! { "config.server_id": &server.id },
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// check matching stack
|
||||
for stack in stacks {
|
||||
@@ -626,8 +642,8 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(networks) = &cache.networks {
|
||||
Ok(networks.clone())
|
||||
if let Some(docker) = &cache.docker {
|
||||
Ok(docker.networks.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
@@ -657,7 +673,8 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectNetwork { name: self.network })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -678,8 +695,8 @@ impl Resolve<ReadArgs> for ListDockerImages {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(images) = &cache.images {
|
||||
Ok(images.clone())
|
||||
if let Some(docker) = &cache.docker {
|
||||
Ok(docker.images.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
@@ -706,7 +723,8 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectImage { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -736,7 +754,8 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(ImageHistory { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -757,8 +776,8 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(volumes) = &cache.volumes {
|
||||
Ok(volumes.clone())
|
||||
if let Some(docker) = &cache.docker {
|
||||
Ok(docker.volumes.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
@@ -785,7 +804,8 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectVolume { name: self.volume })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -806,73 +826,54 @@ impl Resolve<ReadArgs> for ListComposeProjects {
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if let Some(projects) = &cache.projects {
|
||||
Ok(projects.clone())
|
||||
if let Some(docker) = &cache.docker {
|
||||
Ok(docker.projects.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TerminalCacheItem {
|
||||
list: Vec<TerminalInfo>,
|
||||
ttl: i64,
|
||||
}
|
||||
// impl Resolve<ReadArgs> for ListAllTerminals {
|
||||
// async fn resolve(
|
||||
// self,
|
||||
// args: &ReadArgs,
|
||||
// ) -> Result<Self::Response, Self::Error> {
|
||||
// // match self.tar
|
||||
// let mut terminals = resource::list_full_for_user::<Server>(
|
||||
// self.query, &args.user, &all_tags,
|
||||
// )
|
||||
// .await?
|
||||
// .into_iter()
|
||||
// .map(|server| async move {
|
||||
// (
|
||||
// list_terminals_inner(&server, self.fresh).await,
|
||||
// (server.id, server.name),
|
||||
// )
|
||||
// })
|
||||
// .collect::<FuturesUnordered<_>>()
|
||||
// .collect::<Vec<_>>()
|
||||
// .await
|
||||
// .into_iter()
|
||||
// .flat_map(|(terminals, server)| {
|
||||
// let terminals = terminals.ok()?;
|
||||
// Some((terminals, server))
|
||||
// })
|
||||
// .flat_map(|(terminals, (server_id, server_name))| {
|
||||
// terminals.into_iter().map(move |info| {
|
||||
// TerminalInfoWithServer::from_terminal_info(
|
||||
// &server_id,
|
||||
// &server_name,
|
||||
// info,
|
||||
// )
|
||||
// })
|
||||
// })
|
||||
// .collect::<Vec<_>>();
|
||||
|
||||
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
|
||||
// terminals.sort_by(|a, b| {
|
||||
// a.server_name.cmp(&b.server_name).then(a.name.cmp(&b.name))
|
||||
// });
|
||||
|
||||
#[derive(Default)]
|
||||
struct TerminalCache(
|
||||
std::sync::Mutex<
|
||||
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
|
||||
>,
|
||||
);
|
||||
|
||||
impl TerminalCache {
|
||||
fn get_or_insert(
|
||||
&self,
|
||||
server_id: String,
|
||||
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
|
||||
if let Some(cached) =
|
||||
self.0.lock().unwrap().get(&server_id).cloned()
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
let to_cache =
|
||||
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
|
||||
self.0.lock().unwrap().insert(server_id, to_cache.clone());
|
||||
to_cache
|
||||
}
|
||||
}
|
||||
|
||||
fn terminals_cache() -> &'static TerminalCache {
|
||||
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
|
||||
TERMINALS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListTerminals {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListTerminalsResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let cache = terminals_cache().get_or_insert(server.id.clone());
|
||||
let mut cache = cache.lock().await;
|
||||
if self.fresh || komodo_timestamp() > cache.ttl {
|
||||
cache.list = periphery_client(&server)?
|
||||
.request(periphery_client::api::terminal::ListTerminals {})
|
||||
.await
|
||||
.context("Failed to get fresh terminal list")?;
|
||||
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
|
||||
Ok(cache.list.clone())
|
||||
} else {
|
||||
Ok(cache.list.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ok(terminals)
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -4,7 +4,6 @@ use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
docker::container::Container,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
@@ -18,15 +17,11 @@ use periphery_client::api::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::get_stack_and_server,
|
||||
state::{
|
||||
action_states, github_client, server_status_cache,
|
||||
stack_status_cache,
|
||||
},
|
||||
state::{action_states, server_status_cache, stack_status_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -89,7 +84,8 @@ impl Resolve<ReadArgs> for GetStackLog {
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLog {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -122,7 +118,8 @@ impl Resolve<ReadArgs> for SearchStackLog {
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLogSearch {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -184,7 +181,8 @@ impl Resolve<ReadArgs> for InspectStackContainer {
|
||||
"No service found matching '{service}'. Was the stack last deployed manually?"
|
||||
).into());
|
||||
};
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -363,7 +361,11 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
StackState::Running => res.running += 1,
|
||||
StackState::Stopped | StackState::Paused => res.stopped += 1,
|
||||
StackState::Down => res.down += 1,
|
||||
StackState::Unknown => res.unknown += 1,
|
||||
StackState::Unknown => {
|
||||
if !stack.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
@@ -371,91 +373,3 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetStackWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetStackWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com"
|
||||
|| stack.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id);
|
||||
let deploy_url =
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut deploy_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == deploy_url {
|
||||
deploy_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
deploy_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
478
bin/core/src/api/read/swarm.rs
Normal file
478
bin/core/src/api/read/swarm.rs
Normal file
@@ -0,0 +1,478 @@
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
swarm::{Swarm, SwarmActionState, SwarmListItem, SwarmState},
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{query::get_all_tags, swarm::swarm_request},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, swarm_status_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for GetSwarm {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Swarm> {
|
||||
Ok(
|
||||
get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarms {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Vec<SwarmListItem>> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<Swarm>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListFullSwarms {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListFullSwarmsResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<Swarm>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSwarmActionState {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<SwarmActionState> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.swarm
|
||||
.get(&swarm.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.get()?;
|
||||
Ok(action_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSwarmsSummary {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSwarmsSummaryResponse> {
|
||||
let swarms = resource::list_full_for_user::<Swarm>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to get swarms from db")?;
|
||||
|
||||
let mut res = GetSwarmsSummaryResponse::default();
|
||||
|
||||
let cache = swarm_status_cache();
|
||||
|
||||
for swarm in swarms {
|
||||
res.total += 1;
|
||||
|
||||
match cache
|
||||
.get(&swarm.id)
|
||||
.await
|
||||
.map(|status| status.state)
|
||||
.unwrap_or_default()
|
||||
{
|
||||
SwarmState::Unknown => {
|
||||
res.unknown += 1;
|
||||
}
|
||||
SwarmState::Healthy => {
|
||||
res.healthy += 1;
|
||||
}
|
||||
SwarmState::Unhealthy => {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarm {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
let inspect = cache
|
||||
.inspect
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("SwarmInspectInfo not available")?;
|
||||
Ok(inspect)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmNodes {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmNodesResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.nodes.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmNode {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmNodeResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmNode {
|
||||
node: self.node,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmServices {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmServicesResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.services.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmService {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmServiceResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmService {
|
||||
service: self.service,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSwarmServiceLog {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSwarmServiceLogResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::GetSwarmServiceLog {
|
||||
service: self.service,
|
||||
tail: self.tail,
|
||||
timestamps: self.timestamps,
|
||||
no_task_ids: self.no_task_ids,
|
||||
no_resolve: self.no_resolve,
|
||||
details: self.details,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for SearchSwarmServiceLog {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<SearchSwarmServiceLogResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::GetSwarmServiceLogSearch {
|
||||
service: self.service,
|
||||
terms: self.terms,
|
||||
combinator: self.combinator,
|
||||
invert: self.invert,
|
||||
timestamps: self.timestamps,
|
||||
no_task_ids: self.no_task_ids,
|
||||
no_resolve: self.no_resolve,
|
||||
details: self.details,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmTasks {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmTasksResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.tasks.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmTask {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmTaskResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmTask {
|
||||
task: self.task,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmSecrets {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmSecretsResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.secrets.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmSecret {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmSecretResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmSecret {
|
||||
secret: self.secret,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmConfigs {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmConfigsResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.configs.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmConfig {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmConfigResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmConfig {
|
||||
config: self.config,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListSwarmStacks {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSwarmStacksResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache =
|
||||
swarm_status_cache().get_or_insert_default(&swarm.id).await;
|
||||
if let Some(lists) = &cache.lists {
|
||||
Ok(lists.stacks.clone())
|
||||
} else {
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectSwarmStack {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<InspectSwarmStackResponse> {
|
||||
let swarm = get_check_permissions::<Swarm>(
|
||||
&self.swarm,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
swarm_request(
|
||||
&swarm.config.server_ids,
|
||||
periphery_client::api::swarm::InspectSwarmStack {
|
||||
stack: self.stack,
|
||||
},
|
||||
)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
@@ -2,7 +2,6 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
sync::{
|
||||
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
|
||||
@@ -12,11 +11,8 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, github_client},
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::action_states,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -154,91 +150,3 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSyncWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSyncWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.git_provider != "github.com"
|
||||
|| sync.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = sync.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id);
|
||||
let sync_url =
|
||||
format!("{host}/listener/github/sync/{}/sync", sync.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut sync_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == sync_url {
|
||||
sync_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
sync_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
247
bin/core/src/api/read/terminal.rs
Normal file
247
bin/core/src/api/read/terminal.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
use anyhow::Context as _;
|
||||
use futures_util::{
|
||||
FutureExt, StreamExt as _, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{ListTerminals, ListTerminalsResponse},
|
||||
entities::{
|
||||
deployment::Deployment,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
terminal::{Terminal, TerminalTarget},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
helpers::periphery_client, permission::get_check_permissions,
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListTerminals {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListTerminalsResponse> {
|
||||
let Some(target) = self.target else {
|
||||
return list_all_terminals_for_user(user, self.use_names).await;
|
||||
};
|
||||
match &target {
|
||||
TerminalTarget::Server { server } => {
|
||||
let server = server
|
||||
.as_ref()
|
||||
.context("Must provide 'target.params.server'")
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
let server = get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Container { server, .. } => {
|
||||
let server = get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Stack { stack, .. } => {
|
||||
let server = get_check_permissions::<Stack>(
|
||||
stack,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
let server = resource::get::<Server>(&server).await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
let server = get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
let server = resource::get::<Server>(&server).await?;
|
||||
list_terminals_on_server(&server, Some(target)).await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_all_terminals_for_user(
|
||||
user: &User,
|
||||
use_names: bool,
|
||||
) -> serror::Result<Vec<Terminal>> {
|
||||
let (mut servers, stacks, deployments) = tokio::try_join!(
|
||||
resource::list_full_for_user::<Server>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
)
|
||||
.map(|res| res.map(|servers| servers
|
||||
.into_iter()
|
||||
// true denotes user actually has permission on this Server.
|
||||
.map(|server| (server, true))
|
||||
.collect::<Vec<_>>())),
|
||||
resource::list_full_for_user::<Stack>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
),
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
Default::default(),
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&[]
|
||||
),
|
||||
)?;
|
||||
|
||||
// Ensure any missing servers are present to query
|
||||
for stack in &stacks {
|
||||
if !stack.config.server_id.is_empty()
|
||||
&& !servers
|
||||
.iter()
|
||||
.any(|(server, _)| server.id == stack.config.server_id)
|
||||
{
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
servers.push((server, false));
|
||||
}
|
||||
}
|
||||
for deployment in &deployments {
|
||||
if !deployment.config.server_id.is_empty()
|
||||
&& !servers
|
||||
.iter()
|
||||
.any(|(server, _)| server.id == deployment.config.server_id)
|
||||
{
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
servers.push((server, false));
|
||||
}
|
||||
}
|
||||
|
||||
let mut terminals = servers
|
||||
.into_iter()
|
||||
.map(|(server, server_permission)| async move {
|
||||
(
|
||||
list_terminals_on_server(&server, None).await,
|
||||
(server.id, server.name, server_permission),
|
||||
)
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flat_map(
|
||||
|(terminals, (server_id, server_name, server_permission))| {
|
||||
let terminals = terminals
|
||||
.ok()?
|
||||
.into_iter()
|
||||
.filter_map(|mut terminal| {
|
||||
// Only keep terminals with appropriate perms.
|
||||
match terminal.target.clone() {
|
||||
TerminalTarget::Server { .. } => server_permission
|
||||
.then(|| {
|
||||
terminal.target = TerminalTarget::Server {
|
||||
server: Some(if use_names {
|
||||
server_name.clone()
|
||||
} else {
|
||||
server_id.clone()
|
||||
}),
|
||||
};
|
||||
terminal
|
||||
}),
|
||||
TerminalTarget::Container { container, .. } => {
|
||||
server_permission.then(|| {
|
||||
terminal.target = TerminalTarget::Container {
|
||||
server: if use_names {
|
||||
server_name.clone()
|
||||
} else {
|
||||
server_id.clone()
|
||||
},
|
||||
container,
|
||||
};
|
||||
terminal
|
||||
})
|
||||
}
|
||||
TerminalTarget::Stack { stack, service } => {
|
||||
stacks.iter().find(|s| s.id == stack).map(|s| {
|
||||
terminal.target = TerminalTarget::Stack {
|
||||
stack: if use_names {
|
||||
s.name.clone()
|
||||
} else {
|
||||
s.id.clone()
|
||||
},
|
||||
service,
|
||||
};
|
||||
terminal
|
||||
})
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
deployments.iter().find(|d| d.id == deployment).map(
|
||||
|d| {
|
||||
terminal.target = TerminalTarget::Deployment {
|
||||
deployment: if use_names {
|
||||
d.name.clone()
|
||||
} else {
|
||||
d.id.clone()
|
||||
},
|
||||
};
|
||||
terminal
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Some(terminals)
|
||||
},
|
||||
)
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
terminals.sort_by(|a, b| {
|
||||
a.target.cmp(&b.target).then(a.name.cmp(&b.name))
|
||||
});
|
||||
|
||||
Ok(terminals)
|
||||
}
|
||||
|
||||
async fn list_terminals_on_server(
|
||||
server: &Server,
|
||||
target: Option<TerminalTarget>,
|
||||
) -> serror::Result<Vec<Terminal>> {
|
||||
periphery_client(server)
|
||||
.await?
|
||||
.request(periphery_client::api::terminal::ListTerminals {
|
||||
target,
|
||||
})
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get Terminal list from Server {} ({})",
|
||||
server.name, server.id
|
||||
)
|
||||
})
|
||||
.map_err(Into::into)
|
||||
}
|
||||
@@ -11,7 +11,8 @@ use komodo_client::{
|
||||
builder::Builder, deployment::Deployment,
|
||||
permission::PermissionLevel, procedure::Procedure, repo::Repo,
|
||||
resource::ResourceQuery, server::Server, stack::Stack,
|
||||
sync::ResourceSync, toml::ResourcesToml, user::User,
|
||||
swarm::Swarm, sync::ResourceSync, toml::ResourcesToml,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -207,42 +208,21 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
let ReadArgs { user } = args;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let mut alerter = get_check_permissions::<Alerter>(
|
||||
ResourceTarget::Swarm(id) => {
|
||||
let mut swarm = get_check_permissions::<Swarm>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Alerter::replace_ids(&mut alerter);
|
||||
res.alerters.push(convert_resource::<Alerter>(
|
||||
alerter,
|
||||
Swarm::replace_ids(&mut swarm);
|
||||
res.swarms.push(convert_resource::<Swarm>(
|
||||
swarm,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let mut sync = get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| !sync.config.linked_repo.is_empty())
|
||||
{
|
||||
ResourceSync::replace_ids(&mut sync);
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let mut server = get_check_permissions::<Server>(
|
||||
&id,
|
||||
@@ -258,31 +238,16 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder = get_check_permissions::<Builder>(
|
||||
ResourceTarget::Stack(id) => {
|
||||
let mut stack = get_check_permissions::<Stack>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder);
|
||||
res.builders.push(convert_resource::<Builder>(
|
||||
builder,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Build::replace_ids(&mut build);
|
||||
res.builds.push(convert_resource::<Build>(
|
||||
build,
|
||||
Stack::replace_ids(&mut stack);
|
||||
res.stacks.push(convert_resource::<Stack>(
|
||||
stack,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
@@ -303,6 +268,21 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Build::replace_ids(&mut build);
|
||||
res.builds.push(convert_resource::<Build>(
|
||||
build,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&id,
|
||||
@@ -318,21 +298,6 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
let mut stack = get_check_permissions::<Stack>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Stack::replace_ids(&mut stack);
|
||||
res.stacks.push(convert_resource::<Stack>(
|
||||
stack,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
let mut procedure = get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
@@ -363,6 +328,57 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
&id_to_tags,
|
||||
));
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let mut sync = get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| !sync.config.linked_repo.is_empty())
|
||||
{
|
||||
ResourceSync::replace_ids(&mut sync);
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder = get_check_permissions::<Builder>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder);
|
||||
res.builders.push(convert_resource::<Builder>(
|
||||
builder,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let mut alerter = get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Alerter::replace_ids(&mut alerter);
|
||||
res.alerters.push(convert_resource::<Alerter>(
|
||||
alerter,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
))
|
||||
}
|
||||
ResourceTarget::System(_) => continue,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ use komodo_client::{
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
swarm::Swarm,
|
||||
sync::ResourceSync,
|
||||
update::{Update, UpdateListItem},
|
||||
user::User,
|
||||
@@ -29,7 +30,7 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
permission::{get_check_permissions, get_resource_ids_for_user},
|
||||
permission::{get_check_permissions, list_resource_ids_for_user},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
@@ -45,99 +46,137 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
self.query
|
||||
} else {
|
||||
let server_query = get_resource_ids_for_user::<Server>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
|
||||
let deployment_query =
|
||||
get_resource_ids_for_user::<Deployment>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
|
||||
let stack_query = get_resource_ids_for_user::<Stack>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
|
||||
let build_query = get_resource_ids_for_user::<Build>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
|
||||
let repo_query = get_resource_ids_for_user::<Repo>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
|
||||
let procedure_query =
|
||||
get_resource_ids_for_user::<Procedure>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
|
||||
let action_query = get_resource_ids_for_user::<Action>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
|
||||
let builder_query = get_resource_ids_for_user::<Builder>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let resource_sync_query = get_resource_ids_for_user::<
|
||||
ResourceSync,
|
||||
>(user)
|
||||
let server_query = list_resource_ids_for_user::<Server>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ResourceSync", "target.id": { "$in": ids }
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
|
||||
let deployment_query =
|
||||
list_resource_ids_for_user::<Deployment>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Deployment", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
|
||||
let stack_query = list_resource_ids_for_user::<Stack>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
|
||||
let build_query = list_resource_ids_for_user::<Build>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
|
||||
let repo_query = list_resource_ids_for_user::<Repo>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
|
||||
let procedure_query = list_resource_ids_for_user::<Procedure>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Procedure", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
|
||||
let action_query = list_resource_ids_for_user::<Action>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
|
||||
let builder_query = list_resource_ids_for_user::<Builder>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query = list_resource_ids_for_user::<Alerter>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let resource_sync_query =
|
||||
list_resource_ids_for_user::<ResourceSync>(
|
||||
None,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ResourceSync", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
|
||||
|
||||
let mut query = self.query.unwrap_or_default();
|
||||
query.extend(doc! {
|
||||
@@ -228,6 +267,14 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
anyhow!("user must be admin to view system updates").into(),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Swarm(id) => {
|
||||
get_check_permissions::<Swarm>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
get_check_permissions::<Server>(
|
||||
id,
|
||||
@@ -236,6 +283,14 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
get_check_permissions::<Stack>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
get_check_permissions::<Deployment>(
|
||||
id,
|
||||
@@ -260,22 +315,6 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
get_check_permissions::<Builder>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
get_check_permissions::<Alerter>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
get_check_permissions::<Procedure>(
|
||||
id,
|
||||
@@ -300,8 +339,16 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
get_check_permissions::<Stack>(
|
||||
ResourceTarget::Builder(id) => {
|
||||
get_check_permissions::<Builder>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
get_check_permissions::<Alerter>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
|
||||
@@ -1,27 +1,15 @@
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use komodo_client::{
|
||||
api::terminal::*,
|
||||
entities::{
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, user::User,
|
||||
},
|
||||
};
|
||||
use komodo_client::{api::terminal::*, entities::user::User};
|
||||
use serror::Json;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::periphery_client,
|
||||
permission::get_check_permissions, resource::get,
|
||||
state::stack_status_cache,
|
||||
auth::auth_request, helpers::terminal::setup_target_for_user,
|
||||
};
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/execute", post(execute_terminal))
|
||||
.route("/execute/container", post(execute_container_exec))
|
||||
.route("/execute/deployment", post(execute_deployment_exec))
|
||||
.route("/execute/stack", post(execute_stack_exec))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
@@ -29,271 +17,34 @@ pub fn router() -> Router {
|
||||
// ExecuteTerminal
|
||||
// =================
|
||||
|
||||
async fn execute_terminal(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_terminal_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteTerminal",
|
||||
skip(user),
|
||||
skip_all,
|
||||
fields(
|
||||
user_id = user.id,
|
||||
operator = user.id,
|
||||
target,
|
||||
terminal,
|
||||
init = format!("{init:?}")
|
||||
)
|
||||
)]
|
||||
async fn execute_terminal_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteTerminalBody {
|
||||
server,
|
||||
async fn execute_terminal(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteTerminalBody {
|
||||
target,
|
||||
terminal,
|
||||
command,
|
||||
}: ExecuteTerminalBody,
|
||||
user: User,
|
||||
init,
|
||||
}): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let (target, terminal, periphery) =
|
||||
setup_target_for_user(target, terminal, init, &user).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let stream = periphery
|
||||
.execute_terminal(target, terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on Terminal")?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_terminal(terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on periphery")?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal/execute request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// ======================
|
||||
// ExecuteContainerExec
|
||||
// ======================
|
||||
|
||||
async fn execute_container_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteContainerExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_container_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteContainerExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_container_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteContainerExecBody {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteContainerExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/container request | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res = async {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/container request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// =======================
|
||||
// ExecuteDeploymentExec
|
||||
// =======================
|
||||
|
||||
async fn execute_deployment_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteDeploymentExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_deployment_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteDeploymentExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_deployment_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteDeploymentExecBody {
|
||||
deployment,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteDeploymentExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!(
|
||||
"/terminal/execute/deployment request | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res = async {
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&deployment.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(deployment.name, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"/terminal/execute/deployment request {req_id} error: {e:#}"
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
|
||||
// ==================
|
||||
// ExecuteStackExec
|
||||
// ==================
|
||||
|
||||
async fn execute_stack_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteStackExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_stack_exec_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteStackExec",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_stack_exec_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteStackExecBody {
|
||||
stack,
|
||||
service,
|
||||
shell,
|
||||
command,
|
||||
}: ExecuteStackExecBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute/stack request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&stack.config.server_id).await?;
|
||||
|
||||
let container = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.context("could not get stack status")?
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.context("could not find service")?
|
||||
.container
|
||||
.as_ref()
|
||||
.context("could not find service container")?
|
||||
.name
|
||||
.clone();
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal/execute/stack request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_bson,
|
||||
};
|
||||
use derive_variants::EnumVariants;
|
||||
use komodo_client::entities::random_string;
|
||||
use komodo_client::{
|
||||
api::user::*,
|
||||
entities::{api_key::ApiKey, komodo_timestamp, user::User},
|
||||
@@ -21,9 +22,7 @@ use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
helpers::{query::get_user, random_string},
|
||||
state::db_client,
|
||||
auth::auth_request, helpers::query::get_user, state::db_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
@@ -66,7 +65,6 @@ async fn variant_handler(
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "UserHandler", level = "debug", skip(user))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<UserRequest>,
|
||||
@@ -89,11 +87,6 @@ async fn handler(
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
#[instrument(
|
||||
name = "PushRecentlyViewed",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -131,11 +124,6 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
#[instrument(
|
||||
name = "SetLastSeenUpdate",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -158,7 +146,11 @@ const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<UserArgs> for CreateApiKey {
|
||||
#[instrument(name = "CreateApiKey", level = "debug", skip(user))]
|
||||
#[instrument(
|
||||
"CreateApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -188,7 +180,11 @@ impl Resolve<UserArgs> for CreateApiKey {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for DeleteApiKey {
|
||||
#[instrument(name = "DeleteApiKey", level = "debug", skip(user))]
|
||||
#[instrument(
|
||||
"DeleteApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
|
||||
@@ -11,17 +11,34 @@ use crate::{permission::get_check_permissions, resource};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAction {
|
||||
#[instrument(name = "CreateAction", skip(user))]
|
||||
#[instrument(
|
||||
"CreateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
resource::create::<Action>(&self.name, self.config, user).await
|
||||
resource::create::<Action>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAction {
|
||||
#[instrument(name = "CopyAction", skip(user))]
|
||||
#[instrument(
|
||||
"CopyAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
copy_action = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -32,12 +49,21 @@ impl Resolve<WriteArgs> for CopyAction {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Action>(&self.name, config.into(), user).await
|
||||
resource::create::<Action>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAction {
|
||||
#[instrument(name = "UpdateAction", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -47,7 +73,15 @@ impl Resolve<WriteArgs> for UpdateAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAction {
|
||||
#[instrument(name = "RenameAction", skip(user))]
|
||||
#[instrument(
|
||||
"RenameAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -57,8 +91,18 @@ impl Resolve<WriteArgs> for RenameAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAction {
|
||||
#[instrument(name = "DeleteAction", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
41
bin/core/src/api/write/alert.rs
Normal file
41
bin/core/src/api/write/alert.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{api::write::CloseAlert, entities::NoData};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
impl Resolve<WriteArgs> for CloseAlert {
|
||||
#[instrument(
|
||||
"CloseAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
alert_id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.alerts
|
||||
.update_one(
|
||||
doc! { "_id": ObjectId::from_str(&self.id)? },
|
||||
doc! { "$set": { "resolved": true } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to close Alert on database")?;
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
@@ -11,17 +11,34 @@ use crate::{permission::get_check_permissions, resource};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAlerter {
|
||||
#[instrument(name = "CreateAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"CreateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
resource::create::<Alerter>(&self.name, self.config, user).await
|
||||
resource::create::<Alerter>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAlerter {
|
||||
#[instrument(name = "CopyAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"CopyAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
copy_alerter = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -32,22 +49,38 @@ impl Resolve<WriteArgs> for CopyAlerter {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Alerter>(&self.name, config.into(), user).await
|
||||
resource::create::<Alerter>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAlerter {
|
||||
#[instrument(name = "DeleteAlerter", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(resource::delete::<Alerter>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Alerter>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
#[instrument(name = "UpdateAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -60,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAlerter {
|
||||
#[instrument(name = "RenameAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"RenameAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,61 +1,75 @@
|
||||
use std::{path::PathBuf, str::FromStr, time::Duration};
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::mongodb::bson::to_document;
|
||||
use database::{
|
||||
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
build::{Build, BuildInfo, PartialBuildConfig},
|
||||
build::{Build, BuildInfo},
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::{
|
||||
PeripheryClient,
|
||||
api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
},
|
||||
use periphery_client::api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
connection::PeripheryConnectionArgs,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
query::get_server_with_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
periphery::PeripheryClient,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{db_client, github_client},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuild {
|
||||
#[instrument(name = "CreateBuild", skip(user))]
|
||||
#[instrument(
|
||||
"CreateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
resource::create::<Build>(&self.name, self.config, user).await
|
||||
resource::create::<Build>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuild {
|
||||
#[instrument(name = "CopyBuild", skip(user))]
|
||||
#[instrument(
|
||||
"CopyBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
copy_build = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -68,19 +82,38 @@ impl Resolve<WriteArgs> for CopyBuild {
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
resource::create::<Build>(&self.name, config.into(), user).await
|
||||
resource::create::<Build>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuild {
|
||||
#[instrument(name = "DeleteBuild", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuild {
|
||||
#[instrument(name = "UpdateBuild", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -90,7 +123,15 @@ impl Resolve<WriteArgs> for UpdateBuild {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuild {
|
||||
#[instrument(name = "RenameBuild", skip(user))]
|
||||
#[instrument(
|
||||
"RenameBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -100,7 +141,14 @@ impl Resolve<WriteArgs> for RenameBuild {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
#[instrument(name = "WriteBuildFileContents", skip(args))]
|
||||
#[instrument(
|
||||
"WriteBuildFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
@@ -172,6 +220,7 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteDockerfileContentsGit", skip_all)]
|
||||
async fn write_dockerfile_contents_git(
|
||||
req: WriteBuildFileContents,
|
||||
args: &WriteArgs,
|
||||
@@ -269,8 +318,9 @@ async fn write_dockerfile_contents_git(
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
if let Err(e) = secret_file::write_async(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write dockerfile contents to {full_path:?}")
|
||||
})
|
||||
{
|
||||
@@ -317,11 +367,6 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
#[instrument(
|
||||
name = "RefreshBuildCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -345,23 +390,28 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
None
|
||||
};
|
||||
|
||||
let (
|
||||
remote_path,
|
||||
remote_contents,
|
||||
remote_error,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
) = if build.config.files_on_host {
|
||||
let RemoteDockerfileContents {
|
||||
path,
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
} = if build.config.files_on_host {
|
||||
// =============
|
||||
// FILES ON HOST
|
||||
// =============
|
||||
match get_on_host_dockerfile(&build).await {
|
||||
Ok(FileContents { path, contents }) => {
|
||||
(Some(path), Some(contents), None, None, None)
|
||||
}
|
||||
Err(e) => {
|
||||
(None, None, Some(format_serror(&e.into())), None, None)
|
||||
RemoteDockerfileContents {
|
||||
path: Some(path),
|
||||
contents: Some(contents),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
Err(e) => RemoteDockerfileContents {
|
||||
error: Some(format_serror(&e.into())),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
} else if let Some(repo) = &repo {
|
||||
let Some(res) = get_git_remote(&build, repo.into()).await?
|
||||
@@ -381,7 +431,7 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
// =============
|
||||
(None, None, None, None, None)
|
||||
RemoteDockerfileContents::default()
|
||||
};
|
||||
|
||||
let info = BuildInfo {
|
||||
@@ -389,11 +439,11 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
built_hash: build.info.built_hash,
|
||||
built_message: build.info.built_message,
|
||||
built_contents: build.info.built_contents,
|
||||
remote_path,
|
||||
remote_contents,
|
||||
remote_error,
|
||||
latest_hash,
|
||||
latest_message,
|
||||
remote_path: path,
|
||||
remote_contents: contents,
|
||||
remote_error: error,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
@@ -428,13 +478,26 @@ async fn get_on_host_periphery(
|
||||
Err(anyhow!("Files on host doesn't work with AWS builder"))
|
||||
}
|
||||
BuilderConfig::Url(config) => {
|
||||
// TODO: Ensure connection is actually established.
|
||||
// Builder id no good because it may be active for multiple connections.
|
||||
let periphery = PeripheryClient::new(
|
||||
config.address,
|
||||
config.passkey,
|
||||
Duration::from_secs(3),
|
||||
);
|
||||
periphery.health_check().await?;
|
||||
Ok(periphery)
|
||||
PeripheryConnectionArgs::from_url_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
)
|
||||
.await?;
|
||||
// Poll for connection to be estalished
|
||||
let mut err = None;
|
||||
for _ in 0..10 {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
match periphery.health_check().await {
|
||||
Ok(_) => return Ok(periphery),
|
||||
Err(e) => err = Some(e),
|
||||
};
|
||||
}
|
||||
Err(err.context("Missing error")?)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
@@ -449,7 +512,7 @@ async fn get_on_host_periphery(
|
||||
"Builder server is disabled or not reachable"
|
||||
));
|
||||
};
|
||||
periphery_client(&server)
|
||||
periphery_client(&server).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -472,15 +535,7 @@ async fn get_on_host_dockerfile(
|
||||
async fn get_git_remote(
|
||||
build: &Build,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<
|
||||
Option<(
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
)>,
|
||||
> {
|
||||
) -> anyhow::Result<Option<RemoteDockerfileContents>> {
|
||||
if clone_args.provider.is_empty() {
|
||||
// Nothing to do here
|
||||
return Ok(None);
|
||||
@@ -507,10 +562,19 @@ async fn get_git_remote(
|
||||
access_token,
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
.context("Failed to clone Build repo")?;
|
||||
|
||||
let relative_path = PathBuf::from_str(&build.config.build_path)
|
||||
.context("Invalid build path")?
|
||||
// Ensure clone / pull successful,
|
||||
// propogate error log -> 'errored' and return.
|
||||
if let Some(failure) = res.logs.iter().find(|log| !log.success) {
|
||||
return Ok(Some(RemoteDockerfileContents {
|
||||
path: Some(format!("Failed at: {}", failure.stage)),
|
||||
error: Some(failure.combined()),
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
|
||||
let relative_path = PathBuf::from(&build.config.build_path)
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
@@ -521,209 +585,20 @@ async fn get_git_remote(
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
};
|
||||
Ok(Some((
|
||||
Some(relative_path.display().to_string()),
|
||||
Ok(Some(RemoteDockerfileContents {
|
||||
path: Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
res.commit_hash,
|
||||
res.commit_message,
|
||||
)))
|
||||
hash: res.commit_hash,
|
||||
message: res.commit_message,
|
||||
}))
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuildWebhook {
|
||||
#[instrument(name = "CreateBuildWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if build.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&build.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !build.config.webhook_enabled {
|
||||
UpdateBuild {
|
||||
id: build.id,
|
||||
config: PartialBuildConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update build to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuildWebhook {
|
||||
#[instrument(name = "DeleteBuildWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't delete webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
#[derive(Default)]
|
||||
pub struct RemoteDockerfileContents {
|
||||
pub path: Option<String>,
|
||||
pub contents: Option<String>,
|
||||
pub error: Option<String>,
|
||||
pub hash: Option<String>,
|
||||
pub message: Option<String>,
|
||||
}
|
||||
|
||||
@@ -11,17 +11,34 @@ use crate::{permission::get_check_permissions, resource};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuilder {
|
||||
#[instrument(name = "CreateBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"CreateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
resource::create::<Builder>(&self.name, self.config, user).await
|
||||
resource::create::<Builder>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuilder {
|
||||
#[instrument(name = "CopyBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"CopyBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
copy_builder = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -32,22 +49,38 @@ impl Resolve<WriteArgs> for CopyBuilder {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Builder>(&self.name, config.into(), user).await
|
||||
resource::create::<Builder>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuilder {
|
||||
#[instrument(name = "DeleteBuilder", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(resource::delete::<Builder>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Builder>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
#[instrument(name = "UpdateBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -60,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuilder {
|
||||
#[instrument(name = "RenameBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"RenameBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -33,18 +33,39 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeployment {
|
||||
#[instrument(name = "CreateDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"CreateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyDeployment {
|
||||
#[instrument(name = "CopyDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"CopyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
copy_deployment = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -56,13 +77,26 @@ impl Resolve<WriteArgs> for CopyDeployment {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Deployment>(&self.name, config.into(), user)
|
||||
.await
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
#[instrument(name = "CreateDeploymentFromContainer", skip(user))]
|
||||
#[instrument(
|
||||
"CreateDeploymentFromContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
deployment = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -85,7 +119,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let container = periphery_client(&server)?
|
||||
let container = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
@@ -149,22 +184,38 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
});
|
||||
}
|
||||
|
||||
resource::create::<Deployment>(&self.name, config, user).await
|
||||
resource::create::<Deployment>(&self.name, config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDeployment {
|
||||
#[instrument(name = "DeleteDeployment", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(resource::delete::<Deployment>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Deployment>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDeployment {
|
||||
#[instrument(name = "UpdateDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -177,7 +228,15 @@ impl Resolve<WriteArgs> for UpdateDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameDeployment {
|
||||
#[instrument(name = "RenameDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"RenameDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -231,7 +290,8 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
if container_state != DeploymentState::NotDeployed {
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
let log = periphery_client(&server)?
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RenameContainer {
|
||||
curr_name: deployment.name.clone(),
|
||||
new_name: name.clone(),
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
@@ -11,6 +9,7 @@ use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -19,10 +18,12 @@ use crate::auth::auth_request;
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
@@ -31,8 +32,10 @@ mod resource;
|
||||
mod server;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
mod swarm;
|
||||
mod sync;
|
||||
mod tag;
|
||||
mod terminal;
|
||||
mod user;
|
||||
mod user_group;
|
||||
mod variable;
|
||||
@@ -45,12 +48,115 @@ pub struct WriteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[args(WriteArgs)]
|
||||
#[response(Response)]
|
||||
#[error(serror::Error)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum WriteRequest {
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta(UpdateResourceMeta),
|
||||
|
||||
// ==== SWARM ====
|
||||
CreateSwarm(CreateSwarm),
|
||||
CopySwarm(CopySwarm),
|
||||
DeleteSwarm(DeleteSwarm),
|
||||
UpdateSwarm(UpdateSwarm),
|
||||
RenameSwarm(RenameSwarm),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
CopyServer(CopyServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
UpdateServerPublicKey(UpdateServerPublicKey),
|
||||
RotateServerKeys(RotateServerKeys),
|
||||
|
||||
// ==== TERMINAL ====
|
||||
CreateTerminal(CreateTerminal),
|
||||
DeleteTerminal(DeleteTerminal),
|
||||
DeleteAllTerminals(DeleteAllTerminals),
|
||||
BatchDeleteAllTerminals(BatchDeleteAllTerminals),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
CopyDeployment(CopyDeployment),
|
||||
CreateDeploymentFromContainer(CreateDeploymentFromContainer),
|
||||
DeleteDeployment(DeleteDeployment),
|
||||
UpdateDeployment(UpdateDeployment),
|
||||
RenameDeployment(RenameDeployment),
|
||||
|
||||
// ==== BUILD ====
|
||||
CreateBuild(CreateBuild),
|
||||
CopyBuild(CopyBuild),
|
||||
DeleteBuild(DeleteBuild),
|
||||
UpdateBuild(UpdateBuild),
|
||||
RenameBuild(RenameBuild),
|
||||
WriteBuildFileContents(WriteBuildFileContents),
|
||||
RefreshBuildCache(RefreshBuildCache),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
DeleteRepo(DeleteRepo),
|
||||
UpdateRepo(UpdateRepo),
|
||||
RenameRepo(RenameRepo),
|
||||
RefreshRepoCache(RefreshRepoCache),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
CreateProcedure(CreateProcedure),
|
||||
CopyProcedure(CopyProcedure),
|
||||
DeleteProcedure(DeleteProcedure),
|
||||
UpdateProcedure(UpdateProcedure),
|
||||
RenameProcedure(RenameProcedure),
|
||||
|
||||
// ==== ACTION ====
|
||||
CreateAction(CreateAction),
|
||||
CopyAction(CopyAction),
|
||||
DeleteAction(DeleteAction),
|
||||
UpdateAction(UpdateAction),
|
||||
RenameAction(RenameAction),
|
||||
|
||||
// ==== SYNC ====
|
||||
CreateResourceSync(CreateResourceSync),
|
||||
CopyResourceSync(CopyResourceSync),
|
||||
DeleteResourceSync(DeleteResourceSync),
|
||||
UpdateResourceSync(UpdateResourceSync),
|
||||
RenameResourceSync(RenameResourceSync),
|
||||
WriteSyncFileContents(WriteSyncFileContents),
|
||||
CommitSync(CommitSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
|
||||
// ==== BUILDER ====
|
||||
CreateBuilder(CreateBuilder),
|
||||
CopyBuilder(CopyBuilder),
|
||||
DeleteBuilder(DeleteBuilder),
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
RenameBuilder(RenameBuilder),
|
||||
|
||||
// ==== ALERTER ====
|
||||
CreateAlerter(CreateAlerter),
|
||||
CopyAlerter(CopyAlerter),
|
||||
DeleteAlerter(DeleteAlerter),
|
||||
UpdateAlerter(UpdateAlerter),
|
||||
RenameAlerter(RenameAlerter),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
CreateOnboardingKey(CreateOnboardingKey),
|
||||
UpdateOnboardingKey(UpdateOnboardingKey),
|
||||
DeleteOnboardingKey(DeleteOnboardingKey),
|
||||
|
||||
// ==== USER ====
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
UpdateUserUsername(UpdateUserUsername),
|
||||
@@ -78,100 +184,6 @@ pub enum WriteRequest {
|
||||
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta(UpdateResourceMeta),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
CopyServer(CopyServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
CreateTerminal(CreateTerminal),
|
||||
DeleteTerminal(DeleteTerminal),
|
||||
DeleteAllTerminals(DeleteAllTerminals),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
CreateStackWebhook(CreateStackWebhook),
|
||||
DeleteStackWebhook(DeleteStackWebhook),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
CopyDeployment(CopyDeployment),
|
||||
CreateDeploymentFromContainer(CreateDeploymentFromContainer),
|
||||
DeleteDeployment(DeleteDeployment),
|
||||
UpdateDeployment(UpdateDeployment),
|
||||
RenameDeployment(RenameDeployment),
|
||||
|
||||
// ==== BUILD ====
|
||||
CreateBuild(CreateBuild),
|
||||
CopyBuild(CopyBuild),
|
||||
DeleteBuild(DeleteBuild),
|
||||
UpdateBuild(UpdateBuild),
|
||||
RenameBuild(RenameBuild),
|
||||
WriteBuildFileContents(WriteBuildFileContents),
|
||||
RefreshBuildCache(RefreshBuildCache),
|
||||
CreateBuildWebhook(CreateBuildWebhook),
|
||||
DeleteBuildWebhook(DeleteBuildWebhook),
|
||||
|
||||
// ==== BUILDER ====
|
||||
CreateBuilder(CreateBuilder),
|
||||
CopyBuilder(CopyBuilder),
|
||||
DeleteBuilder(DeleteBuilder),
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
RenameBuilder(RenameBuilder),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
DeleteRepo(DeleteRepo),
|
||||
UpdateRepo(UpdateRepo),
|
||||
RenameRepo(RenameRepo),
|
||||
RefreshRepoCache(RefreshRepoCache),
|
||||
CreateRepoWebhook(CreateRepoWebhook),
|
||||
DeleteRepoWebhook(DeleteRepoWebhook),
|
||||
|
||||
// ==== ALERTER ====
|
||||
CreateAlerter(CreateAlerter),
|
||||
CopyAlerter(CopyAlerter),
|
||||
DeleteAlerter(DeleteAlerter),
|
||||
UpdateAlerter(UpdateAlerter),
|
||||
RenameAlerter(RenameAlerter),
|
||||
|
||||
// ==== PROCEDURE ====
|
||||
CreateProcedure(CreateProcedure),
|
||||
CopyProcedure(CopyProcedure),
|
||||
DeleteProcedure(DeleteProcedure),
|
||||
UpdateProcedure(UpdateProcedure),
|
||||
RenameProcedure(RenameProcedure),
|
||||
|
||||
// ==== ACTION ====
|
||||
CreateAction(CreateAction),
|
||||
CopyAction(CopyAction),
|
||||
DeleteAction(DeleteAction),
|
||||
UpdateAction(UpdateAction),
|
||||
RenameAction(RenameAction),
|
||||
|
||||
// ==== SYNC ====
|
||||
CreateResourceSync(CreateResourceSync),
|
||||
CopyResourceSync(CopyResourceSync),
|
||||
DeleteResourceSync(DeleteResourceSync),
|
||||
UpdateResourceSync(UpdateResourceSync),
|
||||
RenameResourceSync(RenameResourceSync),
|
||||
WriteSyncFileContents(WriteSyncFileContents),
|
||||
CommitSync(CommitSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
CreateSyncWebhook(CreateSyncWebhook),
|
||||
DeleteSyncWebhook(DeleteSyncWebhook),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
@@ -185,13 +197,16 @@ pub enum WriteRequest {
|
||||
UpdateVariableIsSecret(UpdateVariableIsSecret),
|
||||
DeleteVariable(DeleteVariable),
|
||||
|
||||
// ==== PROVIDERS ====
|
||||
// ==== PROVIDER ====
|
||||
CreateGitProviderAccount(CreateGitProviderAccount),
|
||||
UpdateGitProviderAccount(UpdateGitProviderAccount),
|
||||
DeleteGitProviderAccount(DeleteGitProviderAccount),
|
||||
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
|
||||
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
|
||||
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
|
||||
|
||||
// ==== ALERT ====
|
||||
CloseAlert(CloseAlert),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
@@ -226,31 +241,22 @@ async fn handler(
|
||||
res?
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "WriteRequest",
|
||||
skip(user, request),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
request = format!("{:?}", request.extract_variant())
|
||||
)
|
||||
)]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
user: User,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
info!("/write request | user: {}", user.username);
|
||||
|
||||
let timer = Instant::now();
|
||||
let variant = request.extract_variant();
|
||||
info!("/write request | {variant} | user: {}", user.username);
|
||||
|
||||
let res = request.resolve(&WriteArgs { user }).await;
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} error: {:#}", e.error);
|
||||
warn!(
|
||||
"/write request {req_id} | {variant} | error: {:#}",
|
||||
e.error
|
||||
);
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/write request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res.map(|res| res.0)
|
||||
}
|
||||
|
||||
200
bin/core/src/api/write/onboarding_key.rs
Normal file
200
bin/core/src/api/write/onboarding_key.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateOnboardingKey, CreateOnboardingKeyResponse,
|
||||
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
|
||||
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
|
||||
},
|
||||
entities::{
|
||||
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
|
||||
},
|
||||
};
|
||||
use noise::key::EncodedKeyPair;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateOnboardingKey {
|
||||
#[instrument(
|
||||
"CreateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
name = self.name,
|
||||
expires = self.expires,
|
||||
tags = format!("{:?}", self.tags),
|
||||
copy_server = self.copy_server,
|
||||
create_builder = self.create_builder,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<CreateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let private_key = if let Some(private_key) = self.private_key {
|
||||
private_key
|
||||
} else {
|
||||
format!("O-{}", random_string(30))
|
||||
};
|
||||
let public_key = EncodedKeyPair::from_private_key(&private_key)?
|
||||
.public
|
||||
.into_inner();
|
||||
let onboarding_key = OnboardingKey {
|
||||
public_key,
|
||||
name: self.name,
|
||||
enabled: true,
|
||||
onboarded: Default::default(),
|
||||
created_at: komodo_timestamp(),
|
||||
expires: self.expires,
|
||||
tags: self.tags,
|
||||
copy_server: self.copy_server,
|
||||
create_builder: self.create_builder,
|
||||
};
|
||||
let db = db_client();
|
||||
// Create the key
|
||||
db.onboarding_keys
|
||||
.insert_one(&onboarding_key)
|
||||
.await
|
||||
.context(
|
||||
"Failed to create Server onboarding key on database",
|
||||
)?;
|
||||
let created = db
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &onboarding_key.public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context(
|
||||
"No Server onboarding key found on database after create",
|
||||
)?;
|
||||
Ok(CreateOnboardingKeyResponse {
|
||||
private_key,
|
||||
created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateOnboardingKey {
|
||||
#[instrument(
|
||||
"UpdateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
update = format!("{:?}", self),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
|
||||
// No changes
|
||||
if self.is_none() {
|
||||
return db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let mut update = Document::new();
|
||||
|
||||
if let Some(enabled) = self.enabled {
|
||||
update.insert("enabled", enabled);
|
||||
}
|
||||
|
||||
if let Some(name) = self.name {
|
||||
update.insert("name", name);
|
||||
}
|
||||
|
||||
if let Some(expires) = self.expires {
|
||||
update.insert("expires", expires);
|
||||
}
|
||||
|
||||
if let Some(tags) = self.tags {
|
||||
update.insert("tags", tags);
|
||||
}
|
||||
|
||||
if let Some(copy_server) = self.copy_server {
|
||||
update.insert("copy_server", copy_server);
|
||||
}
|
||||
|
||||
if let Some(create_builder) = self.create_builder {
|
||||
update.insert("create_builder", create_builder);
|
||||
}
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.update_one(query.clone(), doc! { "$set": update })
|
||||
.await
|
||||
.context("Failed to update onboarding key on database")?;
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteOnboardingKey {
|
||||
#[instrument(
|
||||
"DeleteOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<DeleteOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
let creation_key = db
|
||||
.onboarding_keys
|
||||
.find_one(query.clone())
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Server onboarding key matching provided public key not found")
|
||||
.status_code(StatusCode::NOT_FOUND)?;
|
||||
db.onboarding_keys.delete_one(query).await.context(
|
||||
"Failed to delete Server onboarding key from database",
|
||||
)?;
|
||||
Ok(creation_key)
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,7 @@ use database::mungos::{
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use derive_variants::ExtractVariant as _;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -22,7 +23,15 @@ use crate::{helpers::query::get_user, state::db_client};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
#[instrument(name = "UpdateUserAdmin", skip(super_admin))]
|
||||
#[instrument(
|
||||
"UpdateUserAdmin",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = super_admin.id,
|
||||
target_user = self.user_id,
|
||||
admin = self.admin,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: super_admin }: &WriteArgs,
|
||||
@@ -60,7 +69,17 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdateUserBasePermissions",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
target_user = self.user_id,
|
||||
enabled = self.enabled,
|
||||
create_servers = self.create_servers,
|
||||
create_builds = self.create_builds,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
@@ -117,7 +136,16 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
#[instrument(name = "UpdatePermissionOnResourceType", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdatePermissionOnResourceType",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_type.to_string(),
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
@@ -185,7 +213,17 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
#[instrument(name = "UpdatePermissionOnTarget", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdatePermissionOnTarget",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_target.extract_variant().to_string(),
|
||||
resource_id = self.resource_target.extract_variant_id().1,
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
@@ -269,8 +307,8 @@ async fn extract_user_target_with_validation(
|
||||
.users
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for users")?
|
||||
.context("no matching user found")?
|
||||
.context("Failed to query db for users")?
|
||||
.context("No matching user found")?
|
||||
.id;
|
||||
Ok((UserTargetVariant::User, id))
|
||||
}
|
||||
@@ -283,8 +321,8 @@ async fn extract_user_target_with_validation(
|
||||
.user_groups
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for user_groups")?
|
||||
.context("no matching user_group found")?
|
||||
.context("Failed to query db for user_groups")?
|
||||
.context("No matching user_group found")?
|
||||
.id;
|
||||
Ok((UserTargetVariant::UserGroup, id))
|
||||
}
|
||||
@@ -300,47 +338,19 @@ async fn extract_resource_target_with_validation(
|
||||
let res = resource_target.extract_variant_id();
|
||||
Ok((res.0, res.1.clone()))
|
||||
}
|
||||
ResourceTarget::Build(ident) => {
|
||||
ResourceTarget::Swarm(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.builds
|
||||
.swarms
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for builds")?
|
||||
.context("no matching build found")?
|
||||
.context("Failed to query db for swarms")?
|
||||
.context("No matching server found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Build, id))
|
||||
}
|
||||
ResourceTarget::Builder(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.builders
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for builders")?
|
||||
.context("no matching builder found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Builder, id))
|
||||
}
|
||||
ResourceTarget::Deployment(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.deployments
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for deployments")?
|
||||
.context("no matching deployment found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Deployment, id))
|
||||
Ok((ResourceTargetVariant::Server, id))
|
||||
}
|
||||
ResourceTarget::Server(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
@@ -351,11 +361,53 @@ async fn extract_resource_target_with_validation(
|
||||
.servers
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.context("no matching server found")?
|
||||
.context("Failed to query db for servers")?
|
||||
.context("No matching server found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Server, id))
|
||||
}
|
||||
ResourceTarget::Stack(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.stacks
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("Failed to query db for stacks")?
|
||||
.context("No matching stack found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Stack, id))
|
||||
}
|
||||
ResourceTarget::Deployment(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.deployments
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("Failed to query db for deployments")?
|
||||
.context("No matching deployment found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Deployment, id))
|
||||
}
|
||||
ResourceTarget::Build(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.builds
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("Failed to query db for builds")?
|
||||
.context("No matching build found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Build, id))
|
||||
}
|
||||
ResourceTarget::Repo(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
@@ -365,25 +417,11 @@ async fn extract_resource_target_with_validation(
|
||||
.repos
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for repos")?
|
||||
.context("no matching repo found")?
|
||||
.context("Failed to query db for repos")?
|
||||
.context("No matching repo found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Repo, id))
|
||||
}
|
||||
ResourceTarget::Alerter(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.alerters
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for alerters")?
|
||||
.context("no matching alerter found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Alerter, id))
|
||||
}
|
||||
ResourceTarget::Procedure(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
@@ -393,8 +431,8 @@ async fn extract_resource_target_with_validation(
|
||||
.procedures
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for procedures")?
|
||||
.context("no matching procedure found")?
|
||||
.context("Failed to query db for procedures")?
|
||||
.context("No matching procedure found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Procedure, id))
|
||||
}
|
||||
@@ -407,8 +445,8 @@ async fn extract_resource_target_with_validation(
|
||||
.actions
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for actions")?
|
||||
.context("no matching action found")?
|
||||
.context("Failed to query db for actions")?
|
||||
.context("No matching action found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Action, id))
|
||||
}
|
||||
@@ -421,24 +459,38 @@ async fn extract_resource_target_with_validation(
|
||||
.resource_syncs
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for resource syncs")?
|
||||
.context("no matching resource sync found")?
|
||||
.context("Failed to query db for resource syncs")?
|
||||
.context("No matching resource sync found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ResourceSync, id))
|
||||
}
|
||||
ResourceTarget::Stack(ident) => {
|
||||
ResourceTarget::Builder(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.stacks
|
||||
.builders
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for stacks")?
|
||||
.context("no matching stack found")?
|
||||
.context("Failed to query db for builders")?
|
||||
.context("No matching builder found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Stack, id))
|
||||
Ok((ResourceTargetVariant::Builder, id))
|
||||
}
|
||||
ResourceTarget::Alerter(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.alerters
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("Failed to query db for alerters")?
|
||||
.context("No matching alerter found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Alerter, id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,17 +11,34 @@ use crate::{permission::get_check_permissions, resource};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateProcedure {
|
||||
#[instrument(name = "CreateProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"CreateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateProcedureResponse> {
|
||||
resource::create::<Procedure>(&self.name, self.config, user).await
|
||||
resource::create::<Procedure>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyProcedure {
|
||||
#[instrument(name = "CopyProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"CopyProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
copy_procedure = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -33,13 +50,26 @@ impl Resolve<WriteArgs> for CopyProcedure {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await
|
||||
resource::create::<Procedure>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
#[instrument(name = "UpdateProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -52,7 +82,15 @@ impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameProcedure {
|
||||
#[instrument(name = "RenameProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"RenameProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -65,11 +103,18 @@ impl Resolve<WriteArgs> for RenameProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteProcedure {
|
||||
#[instrument(name = "DeleteProcedure", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteProcedureResponse> {
|
||||
Ok(resource::delete::<Procedure>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Procedure>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,7 +10,9 @@ use komodo_client::{
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
helpers::update::{add_update, make_update},
|
||||
@@ -20,25 +22,41 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
#[instrument(
|
||||
"CreateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can create git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can create git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: GitProviderAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
@@ -51,14 +69,14 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
.git_accounts
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context("failed to create git provider account on db")?
|
||||
.context("Failed to create git provider account on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create git provider account",
|
||||
"Create git provider account",
|
||||
format!(
|
||||
"Created git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -70,7 +88,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create git provider account | {e:#}")
|
||||
error!("Failed to add update for create git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -79,14 +97,25 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
#[instrument(
|
||||
"UpdateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can update git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can update git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -94,8 +123,8 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty domain")
|
||||
.into(),
|
||||
anyhow!("Cannot update git provider with empty domain")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -103,8 +132,8 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty username")
|
||||
.into(),
|
||||
anyhow!("Cannot update git provider with empty username")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -118,7 +147,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"failed to serialize partial git provider account to bson",
|
||||
"Failed to serialize partial git provider account to bson",
|
||||
)?;
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
@@ -128,17 +157,17 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update git provider account on db")?;
|
||||
.context("Failed to update git provider account on db")?;
|
||||
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update git provider account",
|
||||
"Update git provider account",
|
||||
format!(
|
||||
"Updated git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -150,7 +179,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update git provider account | {e:#}")
|
||||
error!("Failed to add update for update git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -159,14 +188,22 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
#[instrument(
|
||||
"DeleteGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can delete git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can delete git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -179,16 +216,19 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
let db = db_client();
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
};
|
||||
delete_one_by_id(&db.git_accounts, &self.id, None)
|
||||
.await
|
||||
.context("failed to delete git account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete git provider account",
|
||||
"Delete git provider account",
|
||||
format!(
|
||||
"Deleted git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -200,7 +240,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete git provider account | {e:#}")
|
||||
error!("Failed to add update for delete git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -209,6 +249,15 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"CreateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -216,20 +265,26 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"only admins can create docker registry account accounts"
|
||||
"Only admins can create docker registry account accounts"
|
||||
)
|
||||
.into(),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: DockerRegistryAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
@@ -243,15 +298,15 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context(
|
||||
"failed to create docker registry account account on db",
|
||||
"Failed to create docker registry account account on db",
|
||||
)?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create docker registry account",
|
||||
"Create docker registry account",
|
||||
format!(
|
||||
"Created docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -263,7 +318,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create docker registry account | {e:#}")
|
||||
error!("Failed to add update for create docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -272,14 +327,24 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"UpdateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can update docker registry accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can update docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -288,9 +353,9 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
"Cannot update docker registry account with empty domain"
|
||||
)
|
||||
.into(),
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -299,9 +364,9 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
"Cannot update docker registry account with empty username"
|
||||
)
|
||||
.into(),
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -314,7 +379,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"failed to serialize partial docker registry account account to bson",
|
||||
"Failed to serialize partial docker registry account account to bson",
|
||||
)?;
|
||||
|
||||
let db = db_client();
|
||||
@@ -326,19 +391,19 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"failed to update docker registry account account on db",
|
||||
"Failed to update docker registry account account on db",
|
||||
)?;
|
||||
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for registry accounts")?
|
||||
.context("Failed to query db for registry accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update docker registry account",
|
||||
"Update docker registry account",
|
||||
format!(
|
||||
"Updated docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -350,7 +415,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update docker registry account | {e:#}")
|
||||
error!("Failed to add update for update docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -359,14 +424,22 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"DeleteDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can delete docker registry accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can delete docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -380,16 +453,19 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
};
|
||||
delete_one_by_id(&db.registry_accounts, &self.id, None)
|
||||
.await
|
||||
.context("failed to delete registry account on db")?;
|
||||
.context("Failed to delete registry account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete registry account",
|
||||
"Delete registry account",
|
||||
format!(
|
||||
"Deleted registry account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -401,7 +477,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete docker registry account | {e:#}")
|
||||
error!("Failed to add update for delete docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
@@ -7,19 +7,14 @@ use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData, Operation, RepoExecutionArgs,
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
NoData, Operation, RepoExecutionArgs, komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::{PartialRepoConfig, Repo, RepoInfo},
|
||||
repo::{Repo, RepoInfo},
|
||||
server::Server,
|
||||
to_path_compatible_name,
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -31,23 +26,40 @@ use crate::{
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, db_client, github_client},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepo {
|
||||
#[instrument(name = "CreateRepo", skip(user))]
|
||||
#[instrument(
|
||||
"CreateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
resource::create::<Repo>(&self.name, self.config, user).await
|
||||
resource::create::<Repo>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyRepo {
|
||||
#[instrument(name = "CopyRepo", skip(user))]
|
||||
#[instrument(
|
||||
"CopyRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
copy_repo = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -58,19 +70,38 @@ impl Resolve<WriteArgs> for CopyRepo {
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Repo>(&self.name, config.into(), user).await
|
||||
resource::create::<Repo>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepo {
|
||||
#[instrument(name = "DeleteRepo", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateRepo {
|
||||
#[instrument(name = "UpdateRepo", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -80,7 +111,15 @@ impl Resolve<WriteArgs> for UpdateRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameRepo {
|
||||
#[instrument(name = "RenameRepo", skip(user))]
|
||||
#[instrument(
|
||||
"RenameRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -127,7 +166,8 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::git::RenameRepo {
|
||||
curr_name: to_path_compatible_name(&repo.name),
|
||||
new_name: name.clone(),
|
||||
@@ -156,11 +196,6 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
#[instrument(
|
||||
name = "RefreshRepoCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -232,220 +267,3 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepoWebhook {
|
||||
#[instrument(name = "CreateRepoWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if repo.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&repo.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo_name, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
UpdateRepo {
|
||||
id: repo.id,
|
||||
config: PartialRepoConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update repo to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepoWebhook {
|
||||
#[instrument(name = "DeleteRepoWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo_name, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,35 @@
|
||||
use anyhow::anyhow;
|
||||
use derive_variants::ExtractVariant as _;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
|
||||
repo::Repo, server::Server, stack::Stack, swarm::Swarm,
|
||||
sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::resource::{self, ResourceMetaUpdate};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
#[instrument(name = "UpdateResourceMeta", skip(args))]
|
||||
#[instrument(
|
||||
"UpdateResourceMeta",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
resource_type = self.target.extract_variant().to_string(),
|
||||
resource_id = self.target.extract_variant_id().1,
|
||||
description = self.description,
|
||||
template = self.template,
|
||||
tags = format!("{:?}", self.tags),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
@@ -28,12 +43,18 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!("cannot update meta of System resource target")
|
||||
.into(),
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Swarm(id) => {
|
||||
resource::update_meta::<Swarm>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_meta::<Server>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_meta::<Stack>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_meta::<Deployment>(&id, meta, args).await?;
|
||||
}
|
||||
@@ -43,12 +64,6 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_meta::<Repo>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_meta::<Builder>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_meta::<Alerter>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_meta::<Procedure>(&id, meta, args).await?;
|
||||
}
|
||||
@@ -59,8 +74,11 @@ impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
resource::update_meta::<ResourceSync>(&id, meta, args)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_meta::<Stack>(&id, meta, args).await?;
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_meta::<Builder>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_meta::<Alerter>(&id, meta, args).await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateResourceMetaResponse {})
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use anyhow::Context;
|
||||
use formatting::format_serror;
|
||||
use formatting::{bold, format_serror};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData, Operation,
|
||||
Operation,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server::{Server, ServerInfo},
|
||||
to_docker_compatible_name,
|
||||
update::{Update, UpdateStatus},
|
||||
},
|
||||
@@ -19,23 +19,48 @@ use crate::{
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
resource::{self, update_server_public_key},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServer {
|
||||
#[instrument(name = "CreateServer", skip(user))]
|
||||
#[instrument(
|
||||
"CreateServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
resource::create::<Server>(&self.name, self.config, user).await
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
self.config,
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyServer {
|
||||
#[instrument(name = "CopyServer", skip(user))]
|
||||
#[instrument(
|
||||
"CopyServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.name,
|
||||
copy_server = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -47,19 +72,46 @@ impl Resolve<WriteArgs> for CopyServer {
|
||||
)
|
||||
.await?;
|
||||
|
||||
resource::create::<Server>(&self.name, config.into(), user).await
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteServer {
|
||||
#[instrument(name = "DeleteServer", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Server> {
|
||||
Ok(resource::delete::<Server>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(resource::delete::<Server>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServer {
|
||||
#[instrument(name = "UpdateServer", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -69,7 +121,15 @@ impl Resolve<WriteArgs> for UpdateServer {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameServer {
|
||||
#[instrument(name = "RenameServer", skip(user))]
|
||||
#[instrument(
|
||||
"RenameServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -79,7 +139,15 @@ impl Resolve<WriteArgs> for RenameServer {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateNetwork {
|
||||
#[instrument(name = "CreateNetwork", skip(user))]
|
||||
#[instrument(
|
||||
"CreateNetwork",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
network = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -91,7 +159,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::CreateNetwork, user);
|
||||
@@ -99,7 +167,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::CreateNetwork {
|
||||
.request(api::docker::CreateNetwork {
|
||||
name: to_docker_compatible_name(&self.name),
|
||||
driver: None,
|
||||
})
|
||||
@@ -108,7 +176,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update.push_error_log(
|
||||
"create network",
|
||||
format_serror(&e.context("failed to create network").into()),
|
||||
format_serror(&e.context("Failed to create network").into()),
|
||||
),
|
||||
};
|
||||
|
||||
@@ -119,80 +187,80 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateTerminal {
|
||||
#[instrument(name = "CreateTerminal", skip(user))]
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServerPublicKey {
|
||||
#[instrument(
|
||||
"UpdateServerPublicKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
server = self.server,
|
||||
public_key = self.public_key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
update_server_public_key(&server.id, &self.public_key).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::CreateTerminal {
|
||||
name: self.name,
|
||||
command: self.command,
|
||||
recreate: self.recreate,
|
||||
})
|
||||
.await
|
||||
.context("Failed to create terminal on periphery")?;
|
||||
let mut update =
|
||||
make_update(&server, Operation::UpdateServerKey, &args.user);
|
||||
|
||||
Ok(NoData {})
|
||||
update.push_simple_log(
|
||||
"Update Server Public Key",
|
||||
format!("Public key updated to {}", bold(&self.public_key)),
|
||||
);
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteTerminal {
|
||||
#[instrument(name = "DeleteTerminal", skip(user))]
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for RotateServerKeys {
|
||||
#[instrument(
|
||||
"RotateServerKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteTerminal {
|
||||
terminal: self.terminal,
|
||||
})
|
||||
let public_key = periphery
|
||||
.request(api::keys::RotatePrivateKey {})
|
||||
.await
|
||||
.context("Failed to delete terminal on periphery")?;
|
||||
.context("Failed to rotate Periphery private key")?
|
||||
.public_key;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAllTerminals {
|
||||
#[instrument(name = "DeleteAllTerminals", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteAllTerminals {})
|
||||
.await
|
||||
.context("Failed to delete all terminals on periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
UpdateServerPublicKey {
|
||||
server: server.id,
|
||||
public_key,
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,15 @@ use crate::{api::user::UserArgs, state::db_client};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServiceUser {
|
||||
#[instrument(name = "CreateServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"CreateServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
username = self.username,
|
||||
description = self.description,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -63,7 +71,15 @@ impl Resolve<WriteArgs> for CreateServiceUser {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServiceUserDescription {
|
||||
#[instrument(name = "UpdateServiceUserDescription", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateServiceUserDescription",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
username = self.username,
|
||||
description = self.description,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -99,7 +115,16 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
|
||||
#[instrument(name = "CreateApiKeyForServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"CreateApiKeyForServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
service_user = self.user_id,
|
||||
name = self.name,
|
||||
expires = self.expires,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -125,7 +150,14 @@ impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteApiKeyForServiceUser {
|
||||
#[instrument(name = "DeleteApiKeyForServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"DeleteApiKeyForServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
key = self.key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -8,18 +8,14 @@ use komodo_client::{
|
||||
entities::{
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
stack::{PartialStackConfig, Stack, StackInfo},
|
||||
stack::{Stack, StackInfo},
|
||||
update::Update,
|
||||
user::stack_user,
|
||||
},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
|
||||
WriteComposeContentsToHost,
|
||||
@@ -40,23 +36,40 @@ use crate::{
|
||||
remote::{RemoteComposeContents, get_repo_compose_contents},
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
state::{db_client, github_client},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateStack {
|
||||
#[instrument(name = "CreateStack", skip(user))]
|
||||
#[instrument(
|
||||
"CreateStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
resource::create::<Stack>(&self.name, self.config, user).await
|
||||
resource::create::<Stack>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyStack {
|
||||
#[instrument(name = "CopyStack", skip(user))]
|
||||
#[instrument(
|
||||
"CopyStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.name,
|
||||
copy_stack = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -68,19 +81,38 @@ impl Resolve<WriteArgs> for CopyStack {
|
||||
)
|
||||
.await?;
|
||||
|
||||
resource::create::<Stack>(&self.name, config.into(), user).await
|
||||
resource::create::<Stack>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteStack {
|
||||
#[instrument(name = "DeleteStack", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Stack> {
|
||||
Ok(resource::delete::<Stack>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(resource::delete::<Stack>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateStack {
|
||||
#[instrument(name = "UpdateStack", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -90,7 +122,15 @@ impl Resolve<WriteArgs> for UpdateStack {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameStack {
|
||||
#[instrument(name = "RenameStack", skip(user))]
|
||||
#[instrument(
|
||||
"RenameStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -100,7 +140,15 @@ impl Resolve<WriteArgs> for RenameStack {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
#[instrument(name = "WriteStackFileContents", skip(user))]
|
||||
#[instrument(
|
||||
"WriteStackFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.stack,
|
||||
path = self.file_path,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -149,6 +197,7 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteStackFileContentsOnHost", skip_all)]
|
||||
async fn write_stack_file_contents_on_host(
|
||||
stack: Stack,
|
||||
file_path: String,
|
||||
@@ -170,7 +219,8 @@ async fn write_stack_file_contents_on_host(
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
@@ -220,6 +270,7 @@ async fn write_stack_file_contents_on_host(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument("WriteStackFileContentsGit", skip_all)]
|
||||
async fn write_stack_file_contents_git(
|
||||
mut stack: Stack,
|
||||
file_path: &str,
|
||||
@@ -361,11 +412,6 @@ async fn write_stack_file_contents_git(
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
#[instrument(
|
||||
name = "RefreshStackCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -424,7 +470,8 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
(vec![], None, None, None, None)
|
||||
} else if let Some(server) = server {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.all_file_dependencies(),
|
||||
name: stack.name.clone(),
|
||||
@@ -563,216 +610,3 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateStackWebhook {
|
||||
#[instrument(name = "CreateStackWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateStackWebhookResponse> {
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Stack repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if stack.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&stack.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
UpdateStack {
|
||||
id: stack.id,
|
||||
config: PartialStackConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update stack to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteStackWebhook {
|
||||
#[instrument(name = "DeleteStackWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteStackWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Sync repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
108
bin/core/src/api/write/swarm.rs
Normal file
108
bin/core/src/api/write/swarm.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, swarm::Swarm, update::Update,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateSwarm {
|
||||
#[instrument(
|
||||
"CreateSwarm",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
swarm = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Swarm> {
|
||||
resource::create::<Swarm>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopySwarm {
|
||||
#[instrument(
|
||||
"CopySwarm",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
swarm = self.name,
|
||||
copy_swarm = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Swarm> {
|
||||
let Swarm { config, .. } = get_check_permissions::<Swarm>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Swarm>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteSwarm {
|
||||
#[instrument(
|
||||
"DeleteSwarm",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
swarm = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Swarm> {
|
||||
Ok(resource::delete::<Swarm>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateSwarm {
|
||||
#[instrument(
|
||||
"UpdateSwarm",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
swarm = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Swarm> {
|
||||
Ok(resource::update::<Swarm>(&self.id, self.config, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameSwarm {
|
||||
#[instrument(
|
||||
"RenameSwarm",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
swarm = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(resource::rename::<Swarm>(&self.id, &self.name, user).await?)
|
||||
}
|
||||
}
|
||||
@@ -12,14 +12,13 @@ use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{read::ExportAllResourcesToToml, write::*},
|
||||
entities::{
|
||||
self, NoData, Operation, RepoExecutionArgs, ResourceTarget,
|
||||
self, Operation, RepoExecutionArgs, ResourceTarget,
|
||||
action::Action,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
all_logs_success,
|
||||
build::Build,
|
||||
builder::Builder,
|
||||
config::core::CoreConfig,
|
||||
deployment::Deployment,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
@@ -27,19 +26,14 @@ use komodo_client::{
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
stack::Stack,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
|
||||
SyncDeployUpdate,
|
||||
},
|
||||
sync::{ResourceSync, ResourceSyncInfo, SyncDeployUpdate},
|
||||
to_path_compatible_name,
|
||||
update::{Log, Update},
|
||||
user::sync_user,
|
||||
},
|
||||
};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
@@ -53,7 +47,7 @@ use crate::{
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{db_client, github_client},
|
||||
state::db_client,
|
||||
sync::{
|
||||
deploy::SyncDeployParams, remote::RemoteResources,
|
||||
view::push_updates_for_view,
|
||||
@@ -63,18 +57,39 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateResourceSync {
|
||||
#[instrument(name = "CreateResourceSync", skip(user))]
|
||||
#[instrument(
|
||||
"CreateResourceSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
sync = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
resource::create::<ResourceSync>(&self.name, self.config, user)
|
||||
.await
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyResourceSync {
|
||||
#[instrument(name = "CopyResourceSync", skip(user))]
|
||||
#[instrument(
|
||||
"CopyResourceSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
sync = self.name,
|
||||
copy_sync = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -86,23 +101,43 @@ impl Resolve<WriteArgs> for CopyResourceSync {
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<ResourceSync>(&self.name, config.into(), user)
|
||||
.await
|
||||
resource::create::<ResourceSync>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteResourceSync {
|
||||
#[instrument(name = "DeleteResourceSync", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteResourceSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
sync = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
Ok(resource::delete::<ResourceSync>(&self.id, args).await?)
|
||||
Ok(resource::delete::<ResourceSync>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateResourceSync {
|
||||
#[instrument(name = "UpdateResourceSync", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateResourceSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
sync = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -115,7 +150,15 @@ impl Resolve<WriteArgs> for UpdateResourceSync {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameResourceSync {
|
||||
#[instrument(name = "RenameResourceSync", skip(user))]
|
||||
#[instrument(
|
||||
"RenameResourceSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
sync = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -128,7 +171,16 @@ impl Resolve<WriteArgs> for RenameResourceSync {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteSyncFileContents {
|
||||
#[instrument(name = "WriteSyncFileContents", skip(args))]
|
||||
#[instrument(
|
||||
"WriteSyncFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
sync = self.sync,
|
||||
resource_path = self.resource_path,
|
||||
file_path = self.file_path,
|
||||
)
|
||||
)]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
@@ -173,6 +225,7 @@ impl Resolve<WriteArgs> for WriteSyncFileContents {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteSyncFileContentsOnHost", skip_all)]
|
||||
async fn write_sync_file_contents_on_host(
|
||||
req: WriteSyncFileContents,
|
||||
args: &WriteArgs,
|
||||
@@ -196,15 +249,7 @@ async fn write_sync_file_contents_on_host(
|
||||
.context("Invalid resource path")?;
|
||||
let full_path = root.join(&resource_path).join(&file_path);
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize resource file parent directory {parent:?}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
if let Err(e) = tokio::fs::write(&full_path, &contents)
|
||||
if let Err(e) = secret_file::write_async(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
@@ -243,6 +288,7 @@ async fn write_sync_file_contents_on_host(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument("WriteSyncFileContentsGit", skip_all)]
|
||||
async fn write_sync_file_contents_git(
|
||||
req: WriteSyncFileContents,
|
||||
args: &WriteArgs,
|
||||
@@ -394,7 +440,14 @@ async fn write_sync_file_contents_git(
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CommitSync {
|
||||
#[instrument(name = "CommitSync", skip(args))]
|
||||
#[instrument(
|
||||
"CommitSync",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
sync = self.sync,
|
||||
)
|
||||
)]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
@@ -481,12 +534,9 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
.sync_directory
|
||||
.join(to_path_compatible_name(&sync.name))
|
||||
.join(&resource_path);
|
||||
if let Some(parent) = file_path.parent() {
|
||||
tokio::fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize resource file parent directory {parent:?}"))?;
|
||||
};
|
||||
if let Err(e) = tokio::fs::write(&file_path, &res.toml)
|
||||
let span = info_span!("CommitSyncOnHost");
|
||||
if let Err(e) = secret_file::write_async(&file_path, &res.toml)
|
||||
.instrument(span)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write resource file to {file_path:?}",)
|
||||
@@ -579,6 +629,7 @@ impl Resolve<WriteArgs> for CommitSync {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("CommitSyncGit", skip_all)]
|
||||
async fn commit_git_sync(
|
||||
mut args: RepoExecutionArgs,
|
||||
resource_path: &Path,
|
||||
@@ -623,11 +674,6 @@ async fn commit_git_sync(
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
#[instrument(
|
||||
name = "RefreshResourceSyncPending",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -972,215 +1018,3 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
Ok(crate::resource::get::<ResourceSync>(&sync.id).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateSyncWebhook {
|
||||
#[instrument(name = "CreateSyncWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateSyncWebhookResponse> {
|
||||
let WriteArgs { user } = args;
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = sync.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if sync.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&sync.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
SyncWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id)
|
||||
}
|
||||
SyncWebhookAction::Sync => {
|
||||
format!("{host}/listener/github/sync/{}/sync", sync.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !sync.config.webhook_enabled {
|
||||
UpdateResourceSync {
|
||||
id: sync.id,
|
||||
config: PartialResourceSyncConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update sync to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteSyncWebhook {
|
||||
#[instrument(name = "DeleteSyncWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteSyncWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if sync.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = sync.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Sync repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
SyncWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id)
|
||||
}
|
||||
SyncWebhookAction::Sync => {
|
||||
format!("{host}/listener/github/sync/{}/sync", sync.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,7 +27,15 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateTag {
|
||||
#[instrument(name = "CreateTag", skip(user))]
|
||||
#[instrument(
|
||||
"CreateTag",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
tag = self.name,
|
||||
color = format!("{:?}", self.color),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -68,7 +76,15 @@ impl Resolve<WriteArgs> for CreateTag {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameTag {
|
||||
#[instrument(name = "RenameTag", skip(user))]
|
||||
#[instrument(
|
||||
"RenameTag",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
tag = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -93,7 +109,15 @@ impl Resolve<WriteArgs> for RenameTag {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateTagColor {
|
||||
#[instrument(name = "UpdateTagColor", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateTagColor",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
tag = self.tag,
|
||||
color = format!("{:?}", self.color),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -114,7 +138,14 @@ impl Resolve<WriteArgs> for UpdateTagColor {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteTag {
|
||||
#[instrument(name = "DeleteTag", skip(user))]
|
||||
#[instrument(
|
||||
"DeleteTag",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
tag_id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
309
bin/core/src/api/write/terminal.rs
Normal file
309
bin/core/src/api/write/terminal.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
use anyhow::Context as _;
|
||||
use futures_util::{StreamExt as _, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
NoData, deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, terminal::TerminalTarget,
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
periphery_client,
|
||||
query::get_all_tags,
|
||||
terminal::{
|
||||
create_container_terminal_inner,
|
||||
get_deployment_periphery_container,
|
||||
get_stack_service_periphery_container,
|
||||
},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateTerminal {
|
||||
#[instrument(
|
||||
"CreateTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
terminal = self.name,
|
||||
target = format!("{:?}", self.target),
|
||||
command = self.command,
|
||||
mode = format!("{:?}", self.mode),
|
||||
recreate = format!("{:?}", self.recreate),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
match self.target.clone() {
|
||||
TerminalTarget::Server { server } => {
|
||||
let server = server
|
||||
.context("Must provide 'target.params.server'")
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
create_server_terminal(self, server, user).await?;
|
||||
}
|
||||
TerminalTarget::Container { server, container } => {
|
||||
create_container_terminal(self, server, container, user)
|
||||
.await?;
|
||||
}
|
||||
TerminalTarget::Stack { stack, service } => {
|
||||
let service = service
|
||||
.context("Must provide 'target.params.service'")
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
create_stack_service_terminal(self, stack, service, user)
|
||||
.await?;
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
create_deployment_terminal(self, deployment, user).await?;
|
||||
}
|
||||
};
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_server_terminal(
|
||||
CreateTerminal {
|
||||
name,
|
||||
command,
|
||||
recreate,
|
||||
target: _,
|
||||
mode: _,
|
||||
}: CreateTerminal,
|
||||
server: String,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::CreateServerTerminal {
|
||||
name,
|
||||
command,
|
||||
recreate,
|
||||
})
|
||||
.await
|
||||
.context("Failed to create Server Terminal on Periphery")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn create_container_terminal(
|
||||
req: CreateTerminal,
|
||||
server: String,
|
||||
container: String,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
create_container_terminal_inner(req, &periphery, container).await
|
||||
}
|
||||
|
||||
async fn create_stack_service_terminal(
|
||||
req: CreateTerminal,
|
||||
stack: String,
|
||||
service: String,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let (_, periphery, container) =
|
||||
get_stack_service_periphery_container(&stack, &service, user)
|
||||
.await?;
|
||||
create_container_terminal_inner(req, &periphery, container).await
|
||||
}
|
||||
|
||||
async fn create_deployment_terminal(
|
||||
req: CreateTerminal,
|
||||
deployment: String,
|
||||
user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
let (_, periphery, container) =
|
||||
get_deployment_periphery_container(&deployment, user).await?;
|
||||
create_container_terminal_inner(req, &periphery, container).await
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteTerminal {
|
||||
#[instrument(
|
||||
"DeleteTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
target = format!("{:?}", self.target),
|
||||
terminal = self.terminal,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = match &self.target {
|
||||
TerminalTarget::Server { server } => {
|
||||
let server = server
|
||||
.as_ref()
|
||||
.context("Must provide 'target.params.server'")
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
TerminalTarget::Container { server, .. } => {
|
||||
get_check_permissions::<Server>(
|
||||
server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
TerminalTarget::Stack { stack, .. } => {
|
||||
let server = get_check_permissions::<Stack>(
|
||||
stack,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
resource::get::<Server>(&server).await?
|
||||
}
|
||||
TerminalTarget::Deployment { deployment } => {
|
||||
let server = get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?
|
||||
.config
|
||||
.server_id;
|
||||
resource::get::<Server>(&server).await?
|
||||
}
|
||||
};
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteTerminal {
|
||||
target: self.target,
|
||||
terminal: self.terminal,
|
||||
})
|
||||
.await
|
||||
.context("Failed to delete terminal on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAllTerminals {
|
||||
#[instrument(
|
||||
"DeleteAllTerminals",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteAllTerminals {})
|
||||
.await
|
||||
.context("Failed to delete all terminals on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for BatchDeleteAllTerminals {
|
||||
#[instrument(
|
||||
"BatchDeleteAllTerminals",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
query = format!("{:?}", self.query),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
|
||||
resource::list_full_for_user::<Server>(
|
||||
self.query,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|server| async move {
|
||||
let res = async {
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteAllTerminals {})
|
||||
.await
|
||||
.context("Failed to delete all terminals on Periphery")?;
|
||||
|
||||
anyhow::Ok(())
|
||||
}
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
warn!(
|
||||
"Failed to delete all terminals on {} ({}) | {e:#}",
|
||||
server.name, server.id
|
||||
)
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,14 @@ use super::WriteArgs;
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateLocalUser {
|
||||
#[instrument(name = "CreateLocalUser", skip(admin, self), fields(admin_id = admin.id, username = self.username))]
|
||||
#[instrument(
|
||||
"CreateLocalUser",
|
||||
skip_all,
|
||||
fields(
|
||||
admin_id = admin.id,
|
||||
username = self.username
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
@@ -101,7 +108,14 @@ impl Resolve<WriteArgs> for CreateLocalUser {
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserUsername {
|
||||
#[instrument(name = "UpdateUserUsername", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"UpdateUserUsername",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
new_username = self.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -152,7 +166,11 @@ impl Resolve<WriteArgs> for UpdateUserUsername {
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserPassword {
|
||||
#[instrument(name = "UpdateUserPassword", skip(user, self), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"UpdateUserPassword",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -175,7 +193,14 @@ impl Resolve<WriteArgs> for UpdateUserPassword {
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteUser {
|
||||
#[instrument(name = "DeleteUser", skip(admin), fields(user = self.user))]
|
||||
#[instrument(
|
||||
"DeleteUser",
|
||||
skip_all,
|
||||
fields(
|
||||
admin_id = admin.id,
|
||||
user_to_delete = self.user
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
|
||||
@@ -19,14 +19,21 @@ use crate::state::db_client;
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateUserGroup {
|
||||
#[instrument(name = "CreateUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"CreateUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -57,14 +64,22 @@ impl Resolve<WriteArgs> for CreateUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameUserGroup {
|
||||
#[instrument(name = "RenameUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"RenameUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -86,14 +101,21 @@ impl Resolve<WriteArgs> for RenameUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteUserGroup {
|
||||
#[instrument(name = "DeleteUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"DeleteUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -122,14 +144,22 @@ impl Resolve<WriteArgs> for DeleteUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for AddUserToUserGroup {
|
||||
#[instrument(name = "AddUserToUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"AddUserToUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.user_group,
|
||||
user = self.user,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -169,14 +199,22 @@ impl Resolve<WriteArgs> for AddUserToUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
|
||||
#[instrument(name = "RemoveUserFromUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"RemoveUserFromUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.user_group,
|
||||
user = self.user,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -216,14 +254,22 @@ impl Resolve<WriteArgs> for RemoveUserFromUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for SetUsersInUserGroup {
|
||||
#[instrument(name = "SetUsersInUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"SetUsersInUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.user_group,
|
||||
users = format!("{:?}", self.users)
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
@@ -266,14 +312,22 @@ impl Resolve<WriteArgs> for SetUsersInUserGroup {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for SetEveryoneUserGroup {
|
||||
#[instrument(name = "SetEveryoneUserGroup", skip(admin), fields(admin = admin.username))]
|
||||
#[instrument(
|
||||
"SetEveryoneUserGroup",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
group = self.user_group,
|
||||
everyone = self.everyone,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UserGroup> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin-only")
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -19,7 +19,16 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateVariable {
|
||||
#[instrument(name = "CreateVariable", skip(user, self), fields(name = &self.name))]
|
||||
#[instrument(
|
||||
"CreateVariable",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
variable = self.name,
|
||||
description = self.description,
|
||||
is_secret = self.is_secret,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -68,7 +77,14 @@ impl Resolve<WriteArgs> for CreateVariable {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
#[instrument(name = "UpdateVariableValue", skip(user, self), fields(name = &self.name))]
|
||||
#[instrument(
|
||||
"UpdateVariableValue",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
variable = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -125,7 +141,15 @@ impl Resolve<WriteArgs> for UpdateVariableValue {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
#[instrument(name = "UpdateVariableDescription", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateVariableDescription",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
variable = self.name,
|
||||
description = self.description,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -149,7 +173,15 @@ impl Resolve<WriteArgs> for UpdateVariableDescription {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
#[instrument(name = "UpdateVariableIsSecret", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateVariableIsSecret",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
variable = self.name,
|
||||
is_secret = self.is_secret,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -173,6 +205,14 @@ impl Resolve<WriteArgs> for UpdateVariableIsSecret {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteVariable {
|
||||
#[instrument(
|
||||
"DeleteVariable",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
variable = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
use komodo_client::entities::{
|
||||
config::core::{CoreConfig, OauthCredentials},
|
||||
random_string,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::STATE_PREFIX_LENGTH, config::core_config,
|
||||
helpers::random_string,
|
||||
};
|
||||
use crate::{auth::STATE_PREFIX_LENGTH, config::core_config};
|
||||
|
||||
pub fn github_oauth_client() -> &'static Option<GithubOauthClient> {
|
||||
static GITHUB_OAUTH_CLIENT: OnceLock<Option<GithubOauthClient>> =
|
||||
@@ -76,7 +74,6 @@ impl GithubOauthClient {
|
||||
.into()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_login_redirect_url(
|
||||
&self,
|
||||
redirect: Option<String>,
|
||||
@@ -95,7 +92,6 @@ impl GithubOauthClient {
|
||||
redirect_url
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn check_state(&self, state: &str) -> bool {
|
||||
let mut contained = false;
|
||||
self.states.lock().await.retain(|s| {
|
||||
@@ -109,7 +105,6 @@ impl GithubOauthClient {
|
||||
contained
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_access_token(
|
||||
&self,
|
||||
code: &str,
|
||||
@@ -130,7 +125,6 @@ impl GithubOauthClient {
|
||||
.context("failed to get github access token using code")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_github_user(
|
||||
&self,
|
||||
token: &str,
|
||||
@@ -141,7 +135,6 @@ impl GithubOauthClient {
|
||||
.context("failed to get github user using access token")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn get<R: DeserializeOwned>(
|
||||
&self,
|
||||
endpoint: &str,
|
||||
|
||||
@@ -5,7 +5,7 @@ use axum::{
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
komodo_timestamp, random_string,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
@@ -14,7 +14,6 @@ use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
@@ -53,7 +52,6 @@ struct CallbackQuery {
|
||||
code: String,
|
||||
}
|
||||
|
||||
#[instrument(name = "GithubCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use jsonwebtoken::{DecodingKey, Validation, decode};
|
||||
use komodo_client::entities::config::core::{
|
||||
CoreConfig, OauthCredentials,
|
||||
use jsonwebtoken::dangerous::insecure_decode;
|
||||
use komodo_client::entities::{
|
||||
config::core::{CoreConfig, OauthCredentials},
|
||||
random_string,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::{Deserialize, de::DeserializeOwned};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
auth::STATE_PREFIX_LENGTH, config::core_config,
|
||||
helpers::random_string,
|
||||
};
|
||||
use crate::{auth::STATE_PREFIX_LENGTH, config::core_config};
|
||||
|
||||
pub fn google_oauth_client() -> &'static Option<GoogleOauthClient> {
|
||||
static GOOGLE_OAUTH_CLIENT: OnceLock<Option<GoogleOauthClient>> =
|
||||
@@ -85,7 +83,6 @@ impl GoogleOauthClient {
|
||||
.into()
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_login_redirect_url(
|
||||
&self,
|
||||
redirect: Option<String>,
|
||||
@@ -104,7 +101,6 @@ impl GoogleOauthClient {
|
||||
redirect_url
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn check_state(&self, state: &str) -> bool {
|
||||
let mut contained = false;
|
||||
self.states.lock().await.retain(|s| {
|
||||
@@ -118,7 +114,6 @@ impl GoogleOauthClient {
|
||||
contained
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub async fn get_access_token(
|
||||
&self,
|
||||
code: &str,
|
||||
@@ -139,24 +134,15 @@ impl GoogleOauthClient {
|
||||
.context("failed to get google access token using code")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
pub fn get_google_user(
|
||||
&self,
|
||||
id_token: &str,
|
||||
) -> anyhow::Result<GoogleUser> {
|
||||
let mut v = Validation::new(Default::default());
|
||||
v.insecure_disable_signature_validation();
|
||||
v.validate_aud = false;
|
||||
let res = decode::<GoogleUser>(
|
||||
id_token,
|
||||
&DecodingKey::from_secret(b""),
|
||||
&v,
|
||||
)
|
||||
.context("failed to decode google id token")?;
|
||||
let res = insecure_decode::<GoogleUser>(id_token)
|
||||
.context("failed to decode google id token")?;
|
||||
Ok(res.claims)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
async fn post<R: DeserializeOwned>(
|
||||
&self,
|
||||
endpoint: &str,
|
||||
|
||||
@@ -5,14 +5,16 @@ use axum::{
|
||||
};
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::user::{User, UserConfig};
|
||||
use komodo_client::entities::{
|
||||
random_string,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use serde::Deserialize;
|
||||
use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
@@ -52,7 +54,6 @@ struct CallbackQuery {
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(name = "GoogleCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
|
||||
@@ -9,16 +9,15 @@ use jsonwebtoken::{
|
||||
DecodingKey, EncodingKey, Header, Validation, decode, encode,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::auth::JwtResponse, entities::config::core::CoreConfig,
|
||||
api::auth::JwtResponse,
|
||||
entities::{config::core::CoreConfig, random_string},
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::helpers::random_string;
|
||||
|
||||
type ExchangeTokenMap = Mutex<HashMap<String, (JwtResponse, u128)>>;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct JwtClaims {
|
||||
pub id: String,
|
||||
pub iat: u128,
|
||||
@@ -75,7 +74,6 @@ impl JwtClient {
|
||||
.context("failed to decode token claims")
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub async fn create_exchange_token(
|
||||
&self,
|
||||
jwt: JwtResponse,
|
||||
@@ -91,7 +89,7 @@ impl JwtClient {
|
||||
);
|
||||
exchange_token
|
||||
}
|
||||
#[instrument(level = "debug", skip(self))]
|
||||
|
||||
pub async fn redeem_exchange_token(
|
||||
&self,
|
||||
exchange_token: &str,
|
||||
|
||||
@@ -22,7 +22,7 @@ use crate::{
|
||||
};
|
||||
|
||||
impl Resolve<AuthArgs> for SignUpLocalUser {
|
||||
#[instrument(name = "SignUpLocalUser", skip(self))]
|
||||
#[instrument("SignUpLocalUser", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
@@ -104,7 +104,6 @@ impl Resolve<AuthArgs> for SignUpLocalUser {
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for LoginLocalUser {
|
||||
#[instrument(name = "LoginLocalUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
|
||||
@@ -31,7 +31,6 @@ struct RedirectQuery {
|
||||
redirect: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_request(
|
||||
headers: HeaderMap,
|
||||
mut req: Request,
|
||||
@@ -44,7 +43,6 @@ pub async fn auth_request(
|
||||
Ok(next.run(req).await)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn get_user_id_from_headers(
|
||||
headers: &HeaderMap,
|
||||
) -> anyhow::Result<String> {
|
||||
@@ -77,7 +75,6 @@ pub async fn get_user_id_from_headers(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn authenticate_check_enabled(
|
||||
headers: &HeaderMap,
|
||||
) -> anyhow::Result<User> {
|
||||
@@ -90,7 +87,6 @@ pub async fn authenticate_check_enabled(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_jwt_get_user_id(
|
||||
jwt: &str,
|
||||
) -> anyhow::Result<String> {
|
||||
@@ -102,7 +98,6 @@ pub async fn auth_jwt_get_user_id(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_jwt_check_enabled(
|
||||
jwt: &str,
|
||||
) -> anyhow::Result<User> {
|
||||
@@ -110,7 +105,6 @@ pub async fn auth_jwt_check_enabled(
|
||||
check_enabled(user_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_api_key_get_user_id(
|
||||
key: &str,
|
||||
secret: &str,
|
||||
@@ -135,7 +129,6 @@ pub async fn auth_api_key_get_user_id(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn auth_api_key_check_enabled(
|
||||
key: &str,
|
||||
secret: &str,
|
||||
@@ -144,7 +137,6 @@ pub async fn auth_api_key_check_enabled(
|
||||
check_enabled(user_id).await
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn check_enabled(user_id: String) -> anyhow::Result<User> {
|
||||
let user = get_user(&user_id).await?;
|
||||
if user.enabled {
|
||||
|
||||
@@ -8,7 +8,7 @@ use client::oidc_client;
|
||||
use dashmap::DashMap;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
komodo_timestamp,
|
||||
komodo_timestamp, random_string,
|
||||
user::{User, UserConfig},
|
||||
};
|
||||
use openidconnect::{
|
||||
@@ -23,7 +23,6 @@ use serror::AddStatusCode;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::random_string,
|
||||
state::{db_client, jwt_client},
|
||||
};
|
||||
|
||||
@@ -75,7 +74,6 @@ pub fn router() -> Router {
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(name = "OidcRedirect", level = "debug")]
|
||||
async fn login(
|
||||
Query(RedirectQuery { redirect }): Query<RedirectQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
@@ -138,7 +136,6 @@ struct CallbackQuery {
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[instrument(name = "OidcCallback", level = "debug")]
|
||||
async fn callback(
|
||||
Query(query): Query<CallbackQuery>,
|
||||
) -> anyhow::Result<Redirect> {
|
||||
|
||||
@@ -57,7 +57,6 @@ impl aws_credential_types::provider::ProvideCredentials
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
async fn create_ec2_client(region: String) -> Client {
|
||||
let region = Region::new(region);
|
||||
let config = aws_config::defaults(BehaviorVersion::latest())
|
||||
@@ -68,7 +67,7 @@ async fn create_ec2_client(region: String) -> Client {
|
||||
Client::new(&config)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument("LaunchEc2Instance")]
|
||||
pub async fn launch_ec2_instance(
|
||||
name: &str,
|
||||
config: &AwsBuilderConfig,
|
||||
@@ -84,6 +83,8 @@ pub async fn launch_ec2_instance(
|
||||
assign_public_ip,
|
||||
use_public_ip,
|
||||
user_data,
|
||||
periphery_public_key: _,
|
||||
insecure_tls: _,
|
||||
port: _,
|
||||
use_https: _,
|
||||
git_providers: _,
|
||||
@@ -168,7 +169,7 @@ pub async fn launch_ec2_instance(
|
||||
const MAX_TERMINATION_TRIES: usize = 5;
|
||||
const TERMINATION_WAIT_SECS: u64 = 15;
|
||||
|
||||
#[instrument]
|
||||
#[instrument("TerminateEc2Instance")]
|
||||
pub async fn terminate_ec2_instance_with_retry(
|
||||
region: String,
|
||||
instance_id: &str,
|
||||
@@ -208,7 +209,7 @@ pub async fn terminate_ec2_instance_with_retry(
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
#[instrument(skip(client))]
|
||||
#[instrument("TerminateEc2InstanceInner", skip_all)]
|
||||
async fn terminate_ec2_instance_inner(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
@@ -227,7 +228,6 @@ async fn terminate_ec2_instance_inner(
|
||||
}
|
||||
|
||||
/// Automatically retries 5 times, waiting 2 sec in between
|
||||
#[instrument(level = "debug")]
|
||||
async fn get_ec2_instance_status(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
@@ -259,7 +259,6 @@ async fn get_ec2_instance_status(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn get_ec2_instance_state_name(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
@@ -279,7 +278,6 @@ async fn get_ec2_instance_state_name(
|
||||
}
|
||||
|
||||
/// Automatically retries 5 times, waiting 2 sec in between
|
||||
#[instrument(level = "debug")]
|
||||
async fn get_ec2_instance_public_ip(
|
||||
client: &Client,
|
||||
instance_id: &str,
|
||||
|
||||
@@ -4,6 +4,8 @@ pub mod aws;
|
||||
pub enum BuildCleanupData {
|
||||
/// Nothing to clean up
|
||||
Server,
|
||||
/// Cleanup Periphery connection
|
||||
Url,
|
||||
/// Clean up AWS instance
|
||||
Aws { instance_id: String, region: String },
|
||||
}
|
||||
|
||||
@@ -9,24 +9,97 @@ use environment_file::{
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
DatabaseConfig,
|
||||
core::{
|
||||
AwsCredentials, CoreConfig, Env, GithubWebhookAppConfig,
|
||||
GithubWebhookAppInstallationConfig, OauthCredentials,
|
||||
},
|
||||
core::{AwsCredentials, CoreConfig, Env, OauthCredentials},
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
use noise::key::{RotatableKeyPair, SpkiPublicKey};
|
||||
|
||||
/// Should call in startup to ensure Core errors without valid private key.
|
||||
pub fn core_keys() -> &'static RotatableKeyPair {
|
||||
static CORE_KEYS: OnceLock<RotatableKeyPair> = OnceLock::new();
|
||||
CORE_KEYS.get_or_init(|| {
|
||||
RotatableKeyPair::from_private_key_spec(
|
||||
&core_config().private_key,
|
||||
)
|
||||
.unwrap()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn core_connection_query() -> &'static String {
|
||||
static CORE_HOSTNAME: OnceLock<String> = OnceLock::new();
|
||||
CORE_HOSTNAME.get_or_init(|| {
|
||||
let host = url::Url::parse(&core_config().host)
|
||||
.context("Failed to parse config field 'host' as URL")
|
||||
.unwrap()
|
||||
.host()
|
||||
.context(
|
||||
"Failed to parse config field 'host' | missing host part",
|
||||
)
|
||||
.unwrap()
|
||||
.to_string();
|
||||
format!("core={}", urlencoding::encode(&host))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn periphery_public_keys() -> Option<&'static [SpkiPublicKey]> {
|
||||
static PERIPHERY_PUBLIC_KEYS: OnceLock<Option<Vec<SpkiPublicKey>>> =
|
||||
OnceLock::new();
|
||||
PERIPHERY_PUBLIC_KEYS
|
||||
.get_or_init(|| {
|
||||
core_config().periphery_public_keys.as_ref().map(
|
||||
|public_keys| {
|
||||
public_keys
|
||||
.iter()
|
||||
.flat_map(|public_key| {
|
||||
let (path, maybe_pem) = if let Some(path) =
|
||||
public_key.strip_prefix("file:")
|
||||
{
|
||||
match std::fs::read_to_string(path).with_context(
|
||||
|| format!("Failed to read periphery public key at {path:?}"),
|
||||
) {
|
||||
Ok(public_key) => (Some(path), public_key),
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return None;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
(None, public_key.clone())
|
||||
};
|
||||
match SpkiPublicKey::from_maybe_pem(&maybe_pem) {
|
||||
Ok(public_key) => Some(public_key),
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to read periphery public key{} | {e:#}",
|
||||
if let Some(path) = path {
|
||||
format!("at {path:?}")
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
)
|
||||
})
|
||||
.as_deref()
|
||||
}
|
||||
|
||||
pub fn core_config() -> &'static CoreConfig {
|
||||
static CORE_CONFIG: OnceLock<CoreConfig> = OnceLock::new();
|
||||
CORE_CONFIG.get_or_init(|| {
|
||||
let env: Env = match envy::from_env()
|
||||
.context("Failed to parse Komodo Core environment") {
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}");
|
||||
}
|
||||
};
|
||||
.context("Failed to parse Komodo Core environment")
|
||||
{
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}");
|
||||
}
|
||||
};
|
||||
let config = if env.komodo_config_paths.is_empty() {
|
||||
println!(
|
||||
"{}: No config paths found, using default config",
|
||||
@@ -34,7 +107,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
);
|
||||
CoreConfig::default()
|
||||
} else {
|
||||
let config_keywords = env.komodo_config_keywords
|
||||
let config_keywords = env
|
||||
.komodo_config_keywords
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
@@ -44,7 +118,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
"Config File Keywords".dimmed(),
|
||||
);
|
||||
(ConfigLoader {
|
||||
paths: &env.komodo_config_paths
|
||||
paths: &env
|
||||
.komodo_config_paths
|
||||
.iter()
|
||||
.map(PathBuf::as_path)
|
||||
.collect::<Vec<_>>(),
|
||||
@@ -53,55 +128,53 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
merge_nested: env.komodo_merge_nested_config,
|
||||
extend_array: env.komodo_extend_config_arrays,
|
||||
debug_print: env.komodo_config_debug,
|
||||
}).load::<CoreConfig>()
|
||||
})
|
||||
.load::<CoreConfig>()
|
||||
.expect("Failed at parsing config from paths")
|
||||
};
|
||||
|
||||
let installations = match (
|
||||
maybe_read_list_from_file(
|
||||
env.komodo_github_webhook_app_installations_ids_file,
|
||||
env.komodo_github_webhook_app_installations_ids
|
||||
),
|
||||
env.komodo_github_webhook_app_installations_namespaces
|
||||
) {
|
||||
(Some(ids), Some(namespaces)) => {
|
||||
if ids.len() != namespaces.len() {
|
||||
panic!("KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS length and KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES length mismatch. Got {ids:?} and {namespaces:?}")
|
||||
}
|
||||
ids
|
||||
.into_iter()
|
||||
.zip(namespaces)
|
||||
.map(|(id, namespace)| GithubWebhookAppInstallationConfig {
|
||||
id,
|
||||
namespace
|
||||
})
|
||||
.collect()
|
||||
},
|
||||
(Some(_), None) | (None, Some(_)) => {
|
||||
panic!("Got only one of KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_IDS or KOMODO_GITHUB_WEBHOOK_APP_INSTALLATIONS_NAMESPACES, both MUST be provided");
|
||||
}
|
||||
(None, None) => {
|
||||
config.github_webhook_app.installations
|
||||
}
|
||||
};
|
||||
|
||||
// recreating CoreConfig here makes sure apply all env overrides applied.
|
||||
CoreConfig {
|
||||
// Secret things overridden with file
|
||||
jwt_secret: maybe_read_item_from_file(env.komodo_jwt_secret_file, env.komodo_jwt_secret).unwrap_or(config.jwt_secret),
|
||||
passkey: maybe_read_item_from_file(env.komodo_passkey_file, env.komodo_passkey)
|
||||
.unwrap_or(config.passkey),
|
||||
webhook_secret: maybe_read_item_from_file(env.komodo_webhook_secret_file, env.komodo_webhook_secret)
|
||||
.unwrap_or(config.webhook_secret),
|
||||
private_key: maybe_read_item_from_file(
|
||||
env.komodo_private_key_file,
|
||||
env.komodo_private_key,
|
||||
)
|
||||
.unwrap_or(config.private_key),
|
||||
passkey: maybe_read_item_from_file(
|
||||
env.komodo_passkey_file,
|
||||
env.komodo_passkey,
|
||||
)
|
||||
.or(config.passkey),
|
||||
jwt_secret: maybe_read_item_from_file(
|
||||
env.komodo_jwt_secret_file,
|
||||
env.komodo_jwt_secret,
|
||||
)
|
||||
.unwrap_or(config.jwt_secret),
|
||||
webhook_secret: maybe_read_item_from_file(
|
||||
env.komodo_webhook_secret_file,
|
||||
env.komodo_webhook_secret,
|
||||
)
|
||||
.unwrap_or(config.webhook_secret),
|
||||
database: DatabaseConfig {
|
||||
uri: maybe_read_item_from_file(env.komodo_database_uri_file,env.komodo_database_uri).unwrap_or(config.database.uri),
|
||||
address: env.komodo_database_address.unwrap_or(config.database.address),
|
||||
username: maybe_read_item_from_file(env.komodo_database_username_file,env
|
||||
.komodo_database_username)
|
||||
.unwrap_or(config.database.username),
|
||||
password: maybe_read_item_from_file(env.komodo_database_password_file,env
|
||||
.komodo_database_password)
|
||||
.unwrap_or(config.database.password),
|
||||
uri: maybe_read_item_from_file(
|
||||
env.komodo_database_uri_file,
|
||||
env.komodo_database_uri,
|
||||
)
|
||||
.unwrap_or(config.database.uri),
|
||||
address: env
|
||||
.komodo_database_address
|
||||
.unwrap_or(config.database.address),
|
||||
username: maybe_read_item_from_file(
|
||||
env.komodo_database_username_file,
|
||||
env.komodo_database_username,
|
||||
)
|
||||
.unwrap_or(config.database.username),
|
||||
password: maybe_read_item_from_file(
|
||||
env.komodo_database_password_file,
|
||||
env.komodo_database_password,
|
||||
)
|
||||
.unwrap_or(config.database.password),
|
||||
app_name: env
|
||||
.komodo_database_app_name
|
||||
.unwrap_or(config.database.app_name),
|
||||
@@ -111,64 +184,82 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
},
|
||||
init_admin_username: maybe_read_item_from_file(
|
||||
env.komodo_init_admin_username_file,
|
||||
env.komodo_init_admin_username
|
||||
).or(config.init_admin_username),
|
||||
env.komodo_init_admin_username,
|
||||
)
|
||||
.or(config.init_admin_username),
|
||||
init_admin_password: maybe_read_item_from_file(
|
||||
env.komodo_init_admin_password_file,
|
||||
env.komodo_init_admin_password
|
||||
).unwrap_or(config.init_admin_password),
|
||||
oidc_enabled: env.komodo_oidc_enabled.unwrap_or(config.oidc_enabled),
|
||||
oidc_provider: env.komodo_oidc_provider.unwrap_or(config.oidc_provider),
|
||||
oidc_redirect_host: env.komodo_oidc_redirect_host.unwrap_or(config.oidc_redirect_host),
|
||||
oidc_client_id: maybe_read_item_from_file(env.komodo_oidc_client_id_file,env
|
||||
.komodo_oidc_client_id)
|
||||
.unwrap_or(config.oidc_client_id),
|
||||
oidc_client_secret: maybe_read_item_from_file(env.komodo_oidc_client_secret_file,env
|
||||
.komodo_oidc_client_secret)
|
||||
.unwrap_or(config.oidc_client_secret),
|
||||
oidc_use_full_email: env.komodo_oidc_use_full_email
|
||||
env.komodo_init_admin_password,
|
||||
)
|
||||
.unwrap_or(config.init_admin_password),
|
||||
oidc_enabled: env
|
||||
.komodo_oidc_enabled
|
||||
.unwrap_or(config.oidc_enabled),
|
||||
oidc_provider: env
|
||||
.komodo_oidc_provider
|
||||
.unwrap_or(config.oidc_provider),
|
||||
oidc_redirect_host: env
|
||||
.komodo_oidc_redirect_host
|
||||
.unwrap_or(config.oidc_redirect_host),
|
||||
oidc_client_id: maybe_read_item_from_file(
|
||||
env.komodo_oidc_client_id_file,
|
||||
env.komodo_oidc_client_id,
|
||||
)
|
||||
.unwrap_or(config.oidc_client_id),
|
||||
oidc_client_secret: maybe_read_item_from_file(
|
||||
env.komodo_oidc_client_secret_file,
|
||||
env.komodo_oidc_client_secret,
|
||||
)
|
||||
.unwrap_or(config.oidc_client_secret),
|
||||
oidc_use_full_email: env
|
||||
.komodo_oidc_use_full_email
|
||||
.unwrap_or(config.oidc_use_full_email),
|
||||
oidc_additional_audiences: maybe_read_list_from_file(env.komodo_oidc_additional_audiences_file,env
|
||||
.komodo_oidc_additional_audiences)
|
||||
.unwrap_or(config.oidc_additional_audiences),
|
||||
oidc_additional_audiences: maybe_read_list_from_file(
|
||||
env.komodo_oidc_additional_audiences_file,
|
||||
env.komodo_oidc_additional_audiences,
|
||||
)
|
||||
.unwrap_or(config.oidc_additional_audiences),
|
||||
google_oauth: OauthCredentials {
|
||||
enabled: env
|
||||
.komodo_google_oauth_enabled
|
||||
.unwrap_or(config.google_oauth.enabled),
|
||||
id: maybe_read_item_from_file(env.komodo_google_oauth_id_file,env
|
||||
.komodo_google_oauth_id)
|
||||
.unwrap_or(config.google_oauth.id),
|
||||
secret: maybe_read_item_from_file(env.komodo_google_oauth_secret_file,env
|
||||
.komodo_google_oauth_secret)
|
||||
.unwrap_or(config.google_oauth.secret),
|
||||
id: maybe_read_item_from_file(
|
||||
env.komodo_google_oauth_id_file,
|
||||
env.komodo_google_oauth_id,
|
||||
)
|
||||
.unwrap_or(config.google_oauth.id),
|
||||
secret: maybe_read_item_from_file(
|
||||
env.komodo_google_oauth_secret_file,
|
||||
env.komodo_google_oauth_secret,
|
||||
)
|
||||
.unwrap_or(config.google_oauth.secret),
|
||||
},
|
||||
github_oauth: OauthCredentials {
|
||||
enabled: env
|
||||
.komodo_github_oauth_enabled
|
||||
.unwrap_or(config.github_oauth.enabled),
|
||||
id: maybe_read_item_from_file(env.komodo_github_oauth_id_file,env
|
||||
.komodo_github_oauth_id)
|
||||
.unwrap_or(config.github_oauth.id),
|
||||
secret: maybe_read_item_from_file(env.komodo_github_oauth_secret_file,env
|
||||
.komodo_github_oauth_secret)
|
||||
.unwrap_or(config.github_oauth.secret),
|
||||
id: maybe_read_item_from_file(
|
||||
env.komodo_github_oauth_id_file,
|
||||
env.komodo_github_oauth_id,
|
||||
)
|
||||
.unwrap_or(config.github_oauth.id),
|
||||
secret: maybe_read_item_from_file(
|
||||
env.komodo_github_oauth_secret_file,
|
||||
env.komodo_github_oauth_secret,
|
||||
)
|
||||
.unwrap_or(config.github_oauth.secret),
|
||||
},
|
||||
aws: AwsCredentials {
|
||||
access_key_id: maybe_read_item_from_file(env.komodo_aws_access_key_id_file, env
|
||||
.komodo_aws_access_key_id)
|
||||
.unwrap_or(config.aws.access_key_id),
|
||||
secret_access_key: maybe_read_item_from_file(env.komodo_aws_secret_access_key_file, env
|
||||
.komodo_aws_secret_access_key)
|
||||
.unwrap_or(config.aws.secret_access_key),
|
||||
},
|
||||
github_webhook_app: GithubWebhookAppConfig {
|
||||
app_id: maybe_read_item_from_file(env.komodo_github_webhook_app_app_id_file, env
|
||||
.komodo_github_webhook_app_app_id)
|
||||
.unwrap_or(config.github_webhook_app.app_id),
|
||||
pk_path: env
|
||||
.komodo_github_webhook_app_pk_path
|
||||
.unwrap_or(config.github_webhook_app.pk_path),
|
||||
installations,
|
||||
access_key_id: maybe_read_item_from_file(
|
||||
env.komodo_aws_access_key_id_file,
|
||||
env.komodo_aws_access_key_id,
|
||||
)
|
||||
.unwrap_or(config.aws.access_key_id),
|
||||
secret_access_key: maybe_read_item_from_file(
|
||||
env.komodo_aws_secret_access_key_file,
|
||||
env.komodo_aws_secret_access_key,
|
||||
)
|
||||
.unwrap_or(config.aws.secret_access_key),
|
||||
},
|
||||
|
||||
// Non secrets
|
||||
@@ -177,12 +268,19 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
port: env.komodo_port.unwrap_or(config.port),
|
||||
bind_ip: env.komodo_bind_ip.unwrap_or(config.bind_ip),
|
||||
timezone: env.komodo_timezone.unwrap_or(config.timezone),
|
||||
first_server: env.komodo_first_server.or(config.first_server),
|
||||
first_server_name: env.komodo_first_server_name.unwrap_or(config.first_server_name),
|
||||
frontend_path: env.komodo_frontend_path.unwrap_or(config.frontend_path),
|
||||
jwt_ttl: env
|
||||
.komodo_jwt_ttl
|
||||
.unwrap_or(config.jwt_ttl),
|
||||
periphery_public_keys: env
|
||||
.komodo_periphery_public_keys
|
||||
.or(config.periphery_public_keys),
|
||||
first_server_address: env
|
||||
.komodo_first_server_address
|
||||
.or(config.first_server_address),
|
||||
first_server_name: env
|
||||
.komodo_first_server_name
|
||||
.or(config.first_server_name),
|
||||
frontend_path: env
|
||||
.komodo_frontend_path
|
||||
.unwrap_or(config.frontend_path),
|
||||
jwt_ttl: env.komodo_jwt_ttl.unwrap_or(config.jwt_ttl),
|
||||
sync_directory: env
|
||||
.komodo_sync_directory
|
||||
.unwrap_or(config.sync_directory),
|
||||
@@ -213,24 +311,31 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
ui_write_disabled: env
|
||||
.komodo_ui_write_disabled
|
||||
.unwrap_or(config.ui_write_disabled),
|
||||
disable_confirm_dialog: env.komodo_disable_confirm_dialog
|
||||
disable_confirm_dialog: env
|
||||
.komodo_disable_confirm_dialog
|
||||
.unwrap_or(config.disable_confirm_dialog),
|
||||
disable_websocket_reconnect: env.komodo_disable_websocket_reconnect
|
||||
disable_websocket_reconnect: env
|
||||
.komodo_disable_websocket_reconnect
|
||||
.unwrap_or(config.disable_websocket_reconnect),
|
||||
enable_new_users: env.komodo_enable_new_users
|
||||
enable_new_users: env
|
||||
.komodo_enable_new_users
|
||||
.unwrap_or(config.enable_new_users),
|
||||
disable_user_registration: env.komodo_disable_user_registration
|
||||
disable_user_registration: env
|
||||
.komodo_disable_user_registration
|
||||
.unwrap_or(config.disable_user_registration),
|
||||
disable_non_admin_create: env.komodo_disable_non_admin_create
|
||||
disable_non_admin_create: env
|
||||
.komodo_disable_non_admin_create
|
||||
.unwrap_or(config.disable_non_admin_create),
|
||||
disable_init_resources: env.komodo_disable_init_resources
|
||||
disable_init_resources: env
|
||||
.komodo_disable_init_resources
|
||||
.unwrap_or(config.disable_init_resources),
|
||||
enable_fancy_toml: env.komodo_enable_fancy_toml
|
||||
enable_fancy_toml: env
|
||||
.komodo_enable_fancy_toml
|
||||
.unwrap_or(config.enable_fancy_toml),
|
||||
lock_login_credentials_for: env.komodo_lock_login_credentials_for
|
||||
lock_login_credentials_for: env
|
||||
.komodo_lock_login_credentials_for
|
||||
.unwrap_or(config.lock_login_credentials_for),
|
||||
local_auth: env.komodo_local_auth
|
||||
.unwrap_or(config.local_auth),
|
||||
local_auth: env.komodo_local_auth.unwrap_or(config.local_auth),
|
||||
logging: LogConfig {
|
||||
level: env
|
||||
.komodo_logging_level
|
||||
@@ -238,23 +343,41 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
stdio: env
|
||||
.komodo_logging_stdio
|
||||
.unwrap_or(config.logging.stdio),
|
||||
pretty: env.komodo_logging_pretty
|
||||
pretty: env
|
||||
.komodo_logging_pretty
|
||||
.unwrap_or(config.logging.pretty),
|
||||
location: env.komodo_logging_location
|
||||
location: env
|
||||
.komodo_logging_location
|
||||
.unwrap_or(config.logging.location),
|
||||
ansi: env.komodo_logging_ansi.unwrap_or(config.logging.ansi),
|
||||
otlp_endpoint: env
|
||||
.komodo_logging_otlp_endpoint
|
||||
.unwrap_or(config.logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.komodo_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.logging.opentelemetry_service_name),
|
||||
opentelemetry_scope_name: env
|
||||
.komodo_logging_opentelemetry_scope_name
|
||||
.unwrap_or(config.logging.opentelemetry_scope_name),
|
||||
},
|
||||
pretty_startup_config: env.komodo_pretty_startup_config.unwrap_or(config.pretty_startup_config),
|
||||
unsafe_unsanitized_startup_config: env.komodo_unsafe_unsanitized_startup_config.unwrap_or(config.unsafe_unsanitized_startup_config),
|
||||
internet_interface: env.komodo_internet_interface.unwrap_or(config.internet_interface),
|
||||
ssl_enabled: env.komodo_ssl_enabled.unwrap_or(config.ssl_enabled),
|
||||
ssl_key_file: env.komodo_ssl_key_file.unwrap_or(config.ssl_key_file),
|
||||
ssl_cert_file: env.komodo_ssl_cert_file.unwrap_or(config.ssl_cert_file),
|
||||
pretty_startup_config: env
|
||||
.komodo_pretty_startup_config
|
||||
.unwrap_or(config.pretty_startup_config),
|
||||
unsafe_unsanitized_startup_config: env
|
||||
.komodo_unsafe_unsanitized_startup_config
|
||||
.unwrap_or(config.unsafe_unsanitized_startup_config),
|
||||
internet_interface: env
|
||||
.komodo_internet_interface
|
||||
.unwrap_or(config.internet_interface),
|
||||
ssl_enabled: env
|
||||
.komodo_ssl_enabled
|
||||
.unwrap_or(config.ssl_enabled),
|
||||
ssl_key_file: env
|
||||
.komodo_ssl_key_file
|
||||
.unwrap_or(config.ssl_key_file),
|
||||
ssl_cert_file: env
|
||||
.komodo_ssl_cert_file
|
||||
.unwrap_or(config.ssl_cert_file),
|
||||
|
||||
// These can't be overridden on env
|
||||
secrets: config.secrets,
|
||||
|
||||
179
bin/core/src/connection/client.rs
Normal file
179
bin/core/src/connection/client.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use periphery_client::{
|
||||
CONNECTION_RETRY_SECONDS, transport::LoginMessage,
|
||||
};
|
||||
use transport::{
|
||||
auth::{
|
||||
AddressConnectionIdentifiers, ClientLoginFlow,
|
||||
ConnectionIdentifiers,
|
||||
},
|
||||
fix_ws_address,
|
||||
websocket::{
|
||||
Websocket, WebsocketExt as _, login::LoginWebsocketExt,
|
||||
tungstenite::TungsteniteWebsocket,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::{core_config, core_connection_query},
|
||||
periphery::PeripheryClient,
|
||||
state::periphery_connections,
|
||||
};
|
||||
|
||||
use super::{PeripheryConnection, PeripheryConnectionArgs};
|
||||
|
||||
impl PeripheryConnectionArgs<'_> {
|
||||
pub async fn spawn_client_connection(
|
||||
self,
|
||||
id: String,
|
||||
insecure: bool,
|
||||
) -> anyhow::Result<PeripheryClient> {
|
||||
let Some(address) = self.address else {
|
||||
return Err(anyhow!(
|
||||
"Cannot spawn client connection with empty address"
|
||||
));
|
||||
};
|
||||
|
||||
let address = fix_ws_address(address);
|
||||
let identifiers =
|
||||
AddressConnectionIdentifiers::extract(&address)?;
|
||||
let endpoint = format!("{address}/?{}", core_connection_query());
|
||||
|
||||
let (connection, mut receiver) =
|
||||
periphery_connections().insert(id.clone(), self).await;
|
||||
|
||||
let responses = connection.responses.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let ws = tokio::select! {
|
||||
ws = TungsteniteWebsocket::connect_maybe_tls_insecure(
|
||||
&endpoint,
|
||||
insecure && endpoint.starts_with("wss"),
|
||||
) => ws,
|
||||
_ = connection.cancel.cancelled() => {
|
||||
break
|
||||
}
|
||||
};
|
||||
|
||||
let (mut socket, accept) = match ws {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
connection.set_error(e.error).await;
|
||||
tokio::time::sleep(Duration::from_secs(
|
||||
CONNECTION_RETRY_SECONDS,
|
||||
))
|
||||
.await;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let identifiers = identifiers.build(
|
||||
accept.as_bytes(),
|
||||
core_connection_query().as_bytes(),
|
||||
);
|
||||
|
||||
if let Err(e) =
|
||||
connection.client_login(&mut socket, identifiers).await
|
||||
{
|
||||
connection.set_error(e).await;
|
||||
tokio::time::sleep(Duration::from_secs(
|
||||
CONNECTION_RETRY_SECONDS,
|
||||
))
|
||||
.await;
|
||||
continue;
|
||||
};
|
||||
|
||||
connection.handle_socket(socket, &mut receiver).await
|
||||
}
|
||||
});
|
||||
|
||||
Ok(PeripheryClient { id, responses })
|
||||
}
|
||||
}
|
||||
|
||||
impl PeripheryConnection {
|
||||
/// Custom Core -> Periphery side only login wrapper
|
||||
/// to implement passkey support for backward compatibility
|
||||
#[instrument(
|
||||
"PeripheryLogin",
|
||||
skip(self, socket, identifiers),
|
||||
fields(
|
||||
server_id = self.args.id,
|
||||
address = self.args.address,
|
||||
direction = "CoreToPeriphery"
|
||||
)
|
||||
)]
|
||||
async fn client_login(
|
||||
&self,
|
||||
socket: &mut TungsteniteWebsocket,
|
||||
identifiers: ConnectionIdentifiers<'_>,
|
||||
) -> anyhow::Result<()> {
|
||||
// Get the required auth type
|
||||
let v1_passkey_flow =
|
||||
socket
|
||||
.recv_login_v1_passkey_flow()
|
||||
.await
|
||||
.context("Failed to receive Login V1PasskeyFlow message")?;
|
||||
|
||||
if v1_passkey_flow {
|
||||
handle_passkey_login(socket, self.args.passkey.as_deref()).await
|
||||
} else {
|
||||
self
|
||||
.handle_login::<_, ClientLoginFlow>(socket, identifiers)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("V1PasskeyPeripheryLoginFlow", skip(socket, passkey))]
|
||||
async fn handle_passkey_login(
|
||||
socket: &mut TungsteniteWebsocket,
|
||||
// for legacy auth
|
||||
passkey: Option<&str>,
|
||||
) -> anyhow::Result<()> {
|
||||
let res = async {
|
||||
let passkey = if let Some(passkey) = passkey {
|
||||
passkey.as_bytes().to_vec()
|
||||
} else {
|
||||
core_config()
|
||||
.passkey
|
||||
.as_deref()
|
||||
.context("Periphery requires passkey auth")?
|
||||
.as_bytes()
|
||||
.to_vec()
|
||||
};
|
||||
|
||||
socket
|
||||
.send_message(LoginMessage::V1Passkey(passkey))
|
||||
.await
|
||||
.context("Failed to send Login V1Passkey message")?;
|
||||
|
||||
// Receive login state message and return based on value
|
||||
socket
|
||||
.recv_login_success()
|
||||
.await
|
||||
.context("Failed to receive Login Success message")?;
|
||||
|
||||
anyhow::Ok(())
|
||||
}
|
||||
.await;
|
||||
if let Err(e) = res {
|
||||
if let Err(e) = socket
|
||||
.send_login_error(&e)
|
||||
.await
|
||||
.context("Failed to send login failed to client")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
// Close socket
|
||||
let _ = socket.close().await;
|
||||
// Return the original error
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
545
bin/core/src/connection/mod.rs
Normal file
545
bin/core/src/connection/mod.rs
Normal file
@@ -0,0 +1,545 @@
|
||||
use std::{
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{self, AtomicBool},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use cache::CloneCache;
|
||||
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use encoding::{
|
||||
CastBytes as _, Decode as _, EncodedJsonMessage, EncodedResponse,
|
||||
WithChannel,
|
||||
};
|
||||
use komodo_client::entities::{
|
||||
builder::{AwsBuilderConfig, UrlBuilderConfig},
|
||||
optional_str,
|
||||
server::Server,
|
||||
};
|
||||
use periphery_client::transport::{
|
||||
EncodedTransportMessage, ResponseMessage, TransportMessage,
|
||||
};
|
||||
use serror::serror_into_anyhow_error;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use transport::{
|
||||
auth::{
|
||||
ConnectionIdentifiers, LoginFlow, LoginFlowArgs,
|
||||
PublicKeyValidator,
|
||||
},
|
||||
channel::{BufferedReceiver, Sender, buffered_channel},
|
||||
websocket::{
|
||||
Websocket, WebsocketReceiver as _, WebsocketReceiverExt,
|
||||
WebsocketSender as _,
|
||||
},
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
config::{core_keys, periphery_public_keys},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
pub mod client;
|
||||
pub mod server;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct PeripheryConnections(
|
||||
CloneCache<String, Arc<PeripheryConnection>>,
|
||||
);
|
||||
|
||||
impl PeripheryConnections {
|
||||
/// Insert a recreated connection.
|
||||
/// Ensures the fields which must be persisted between
|
||||
/// connection recreation are carried over.
|
||||
pub async fn insert(
|
||||
&self,
|
||||
server_id: String,
|
||||
args: PeripheryConnectionArgs<'_>,
|
||||
) -> (
|
||||
Arc<PeripheryConnection>,
|
||||
BufferedReceiver<EncodedTransportMessage>,
|
||||
) {
|
||||
let (connection, receiver) = if let Some(existing_connection) =
|
||||
self.0.remove(&server_id).await
|
||||
{
|
||||
existing_connection.with_new_args(args)
|
||||
} else {
|
||||
PeripheryConnection::new(args)
|
||||
};
|
||||
|
||||
self.0.insert(server_id, connection.clone()).await;
|
||||
|
||||
(connection, receiver)
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
&self,
|
||||
server_id: &String,
|
||||
) -> Option<Arc<PeripheryConnection>> {
|
||||
self.0.get(server_id).await
|
||||
}
|
||||
|
||||
/// Remove and cancel connection
|
||||
pub async fn remove(
|
||||
&self,
|
||||
server_id: &String,
|
||||
) -> Option<Arc<PeripheryConnection>> {
|
||||
self
|
||||
.0
|
||||
.remove(server_id)
|
||||
.await
|
||||
.inspect(|connection| connection.cancel())
|
||||
}
|
||||
}
|
||||
|
||||
/// The configurable args of a connection
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub struct PeripheryConnectionArgs<'a> {
|
||||
/// Usually the server id
|
||||
pub id: &'a str,
|
||||
pub address: Option<&'a str>,
|
||||
periphery_public_key: Option<&'a str>,
|
||||
/// V1 legacy support.
|
||||
/// Only possible for Core -> Periphery.
|
||||
passkey: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl PublicKeyValidator for PeripheryConnectionArgs<'_> {
|
||||
type ValidationResult = String;
|
||||
#[instrument("ValidatePeripheryPublicKey", skip(self))]
|
||||
async fn validate(
|
||||
&self,
|
||||
public_key: String,
|
||||
) -> anyhow::Result<Self::ValidationResult> {
|
||||
let invalid_error = || {
|
||||
spawn_update_attempted_public_key(
|
||||
self.id.to_string(),
|
||||
Some(public_key.clone()),
|
||||
);
|
||||
anyhow!("{public_key} is invalid")
|
||||
.context(
|
||||
"Ensure public key matches configured Periphery Public Key",
|
||||
)
|
||||
.context("Core failed to validate Periphery public key")
|
||||
};
|
||||
let core_to_periphery = self.address.is_some();
|
||||
match (self.periphery_public_key, core_to_periphery) {
|
||||
// The key matches expected.
|
||||
(Some(expected), _) if public_key == expected => Ok(public_key),
|
||||
// Explicit auth failed.
|
||||
(Some(_), _) => Err(invalid_error()),
|
||||
// Core -> Periphery connections with no explicit
|
||||
// Periphery public key are not validated.
|
||||
(None, true) => Ok(public_key),
|
||||
// Periphery -> Core connections with no explicit
|
||||
// Periphery public key can fall back to Core config `periphery_public_keys` if defined.
|
||||
(None, false) => {
|
||||
let expected =
|
||||
periphery_public_keys().ok_or_else(invalid_error)?;
|
||||
if expected
|
||||
.iter()
|
||||
.any(|expected| public_key == expected.as_str())
|
||||
{
|
||||
Ok(public_key)
|
||||
} else {
|
||||
Err(invalid_error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PeripheryConnectionArgs<'a> {
|
||||
pub fn from_server(server: &'a Server) -> Self {
|
||||
Self {
|
||||
id: &server.id,
|
||||
address: optional_str(&server.config.address),
|
||||
periphery_public_key: optional_str(&server.info.public_key),
|
||||
passkey: optional_str(&server.config.passkey),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_url_builder(
|
||||
id: &'a str,
|
||||
config: &'a UrlBuilderConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
address: optional_str(&config.address),
|
||||
periphery_public_key: optional_str(
|
||||
&config.periphery_public_key,
|
||||
),
|
||||
passkey: optional_str(&config.passkey),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_aws_builder(
|
||||
id: &'a str,
|
||||
address: &'a str,
|
||||
config: &'a AwsBuilderConfig,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
address: Some(address),
|
||||
periphery_public_key: optional_str(
|
||||
&config.periphery_public_key,
|
||||
),
|
||||
passkey: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_owned(self) -> OwnedPeripheryConnectionArgs {
|
||||
OwnedPeripheryConnectionArgs {
|
||||
id: self.id.to_string(),
|
||||
address: self.address.map(str::to_string),
|
||||
periphery_public_key: self
|
||||
.periphery_public_key
|
||||
.map(str::to_string),
|
||||
passkey: self.passkey.map(str::to_string),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn matches<'b>(
|
||||
self,
|
||||
args: impl Into<PeripheryConnectionArgs<'b>>,
|
||||
) -> bool {
|
||||
self == args.into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct OwnedPeripheryConnectionArgs {
|
||||
/// Usually the Server id.
|
||||
pub id: String,
|
||||
/// Specify outbound connection address.
|
||||
/// Inbound connections have this as None
|
||||
pub address: Option<String>,
|
||||
/// The public key to expect Periphery to have.
|
||||
/// If None, must have 'periphery_public_keys' set
|
||||
/// in Core config, or will error
|
||||
pub periphery_public_key: Option<String>,
|
||||
/// V1 legacy support.
|
||||
/// Only possible for Core -> Periphery connection.
|
||||
pub passkey: Option<String>,
|
||||
}
|
||||
|
||||
impl OwnedPeripheryConnectionArgs {
|
||||
pub fn borrow(&self) -> PeripheryConnectionArgs<'_> {
|
||||
PeripheryConnectionArgs {
|
||||
id: &self.id,
|
||||
address: self.address.as_deref(),
|
||||
periphery_public_key: self.periphery_public_key.as_deref(),
|
||||
passkey: self.passkey.as_deref(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PeripheryConnectionArgs<'_>>
|
||||
for OwnedPeripheryConnectionArgs
|
||||
{
|
||||
fn from(value: PeripheryConnectionArgs<'_>) -> Self {
|
||||
value.to_owned()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<&'a OwnedPeripheryConnectionArgs>
|
||||
for PeripheryConnectionArgs<'a>
|
||||
{
|
||||
fn from(value: &'a OwnedPeripheryConnectionArgs) -> Self {
|
||||
value.borrow()
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends None as InProgress ping.
|
||||
pub type ResponseChannels =
|
||||
CloneCache<Uuid, Sender<EncodedResponse<EncodedJsonMessage>>>;
|
||||
|
||||
pub type TerminalChannels =
|
||||
CloneCache<Uuid, Sender<anyhow::Result<Vec<u8>>>>;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct PeripheryConnection {
|
||||
/// The connection args
|
||||
pub args: OwnedPeripheryConnectionArgs,
|
||||
/// Send and receive bytes over the connection socket.
|
||||
pub sender: Sender<EncodedTransportMessage>,
|
||||
/// Cancel the connection
|
||||
pub cancel: CancellationToken,
|
||||
/// Whether Periphery is currently connected.
|
||||
pub connected: AtomicBool,
|
||||
// These fields must be maintained if new connection replaces old
|
||||
// at the same server id.
|
||||
/// Stores latest connection error
|
||||
pub error: Arc<RwLock<Option<serror::Serror>>>,
|
||||
/// Forward bytes from Periphery to response channel handlers.
|
||||
pub responses: Arc<ResponseChannels>,
|
||||
/// Forward bytes from Periphery to terminal channel handlers.
|
||||
pub terminals: Arc<TerminalChannels>,
|
||||
}
|
||||
|
||||
impl PeripheryConnection {
|
||||
pub fn new(
|
||||
args: impl Into<OwnedPeripheryConnectionArgs>,
|
||||
) -> (
|
||||
Arc<PeripheryConnection>,
|
||||
BufferedReceiver<EncodedTransportMessage>,
|
||||
) {
|
||||
let (sender, receiever) = buffered_channel();
|
||||
(
|
||||
PeripheryConnection {
|
||||
sender,
|
||||
args: args.into(),
|
||||
cancel: CancellationToken::new(),
|
||||
connected: AtomicBool::new(false),
|
||||
error: Default::default(),
|
||||
responses: Default::default(),
|
||||
terminals: Default::default(),
|
||||
}
|
||||
.into(),
|
||||
receiever,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn with_new_args(
|
||||
&self,
|
||||
args: impl Into<OwnedPeripheryConnectionArgs>,
|
||||
) -> (
|
||||
Arc<PeripheryConnection>,
|
||||
BufferedReceiver<EncodedTransportMessage>,
|
||||
) {
|
||||
// Ensure this connection is cancelled.
|
||||
self.cancel();
|
||||
let (sender, receiever) = buffered_channel();
|
||||
(
|
||||
PeripheryConnection {
|
||||
sender,
|
||||
args: args.into(),
|
||||
cancel: CancellationToken::new(),
|
||||
connected: AtomicBool::new(false),
|
||||
error: self.error.clone(),
|
||||
responses: self.responses.clone(),
|
||||
terminals: self.terminals.clone(),
|
||||
}
|
||||
.into(),
|
||||
receiever,
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"StandardPeripheryLoginFlow",
|
||||
skip(self, socket, identifiers),
|
||||
fields(expected_public_key = self.args.periphery_public_key)
|
||||
)]
|
||||
pub async fn handle_login<W: Websocket, L: LoginFlow>(
|
||||
&self,
|
||||
socket: &mut W,
|
||||
identifiers: ConnectionIdentifiers<'_>,
|
||||
) -> anyhow::Result<()> {
|
||||
L::login(LoginFlowArgs {
|
||||
socket,
|
||||
identifiers,
|
||||
private_key: core_keys().load().private.as_str(),
|
||||
public_key_validator: self.args.borrow(),
|
||||
})
|
||||
.await?;
|
||||
// Clear attempted public key after successful login
|
||||
spawn_update_attempted_public_key(self.args.id.clone(), None);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_socket<W: Websocket>(
|
||||
&self,
|
||||
socket: W,
|
||||
receiver: &mut BufferedReceiver<EncodedTransportMessage>,
|
||||
) {
|
||||
let cancel = self.cancel.child_token();
|
||||
|
||||
self.set_connected(true);
|
||||
self.clear_error().await;
|
||||
|
||||
let (mut ws_write, mut ws_read) = socket.split();
|
||||
|
||||
ws_read.set_cancel(cancel.clone());
|
||||
receiver.set_cancel(cancel.clone());
|
||||
|
||||
let forward_writes = async {
|
||||
loop {
|
||||
let message = match tokio::time::timeout(
|
||||
Duration::from_secs(5),
|
||||
receiver.recv(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(message)) => message,
|
||||
Ok(Err(_)) => break,
|
||||
// Handle sending Ping
|
||||
Err(_) => {
|
||||
if let Err(e) = ws_write.ping().await {
|
||||
self.set_error(e).await;
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
match ws_write.send(message.into_bytes()).await {
|
||||
Ok(_) => receiver.clear_buffer(),
|
||||
Err(e) => {
|
||||
self.set_error(e).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Cancel again if not already
|
||||
let _ = ws_write.close().await;
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
let handle_reads = async {
|
||||
loop {
|
||||
match ws_read.recv_message().await {
|
||||
Ok(message) => self.handle_incoming_message(message).await,
|
||||
Err(e) => {
|
||||
self.set_error(e).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Cancel again if not already
|
||||
cancel.cancel();
|
||||
};
|
||||
|
||||
tokio::join!(forward_writes, handle_reads);
|
||||
|
||||
self.set_connected(false);
|
||||
}
|
||||
|
||||
pub async fn handle_incoming_message(
|
||||
&self,
|
||||
message: TransportMessage,
|
||||
) {
|
||||
match message {
|
||||
TransportMessage::Response(data) => {
|
||||
match data.decode().map(ResponseMessage::into_inner) {
|
||||
Ok(WithChannel { channel, data }) => {
|
||||
let Some(response_channel) =
|
||||
self.responses.get(&channel).await
|
||||
else {
|
||||
warn!(
|
||||
"Failed to forward Response message | No response channel found at {channel}"
|
||||
);
|
||||
return;
|
||||
};
|
||||
if let Err(e) = response_channel.send(data).await {
|
||||
warn!(
|
||||
"Failed to forward Response | Response channel failure at {channel} | {e:#}"
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to read Response message | {e:#}");
|
||||
}
|
||||
}
|
||||
}
|
||||
TransportMessage::Terminal(data) => match data.decode() {
|
||||
Ok(WithChannel {
|
||||
channel: channel_id,
|
||||
data,
|
||||
}) => {
|
||||
let Some(channel) = self.terminals.get(&channel_id).await
|
||||
else {
|
||||
warn!(
|
||||
"Failed to forward Terminal message | No terminal channel found at {channel_id}"
|
||||
);
|
||||
return;
|
||||
};
|
||||
if let Err(e) = channel.send(data).await {
|
||||
warn!(
|
||||
"Failed to forward Terminal message | Channel failure at {channel_id} | {e:#}"
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to read Terminal message | {e:#}");
|
||||
}
|
||||
},
|
||||
//
|
||||
other => {
|
||||
warn!("Received unexpected transport message | {other:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_connected(&self, connected: bool) {
|
||||
self.connected.store(connected, atomic::Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn connected(&self) -> bool {
|
||||
self.connected.load(atomic::Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Polls connected 3 times (500ms in between) before bailing.
|
||||
pub async fn bail_if_not_connected(&self) -> anyhow::Result<()> {
|
||||
const POLL_TIMES: usize = 3;
|
||||
for i in 0..POLL_TIMES {
|
||||
if self.connected() {
|
||||
return Ok(());
|
||||
}
|
||||
if i < POLL_TIMES - 1 {
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
}
|
||||
}
|
||||
if let Some(e) = self.error().await {
|
||||
Err(serror_into_anyhow_error(e))
|
||||
} else {
|
||||
Err(anyhow!("Server is not currently connected"))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn error(&self) -> Option<serror::Serror> {
|
||||
self.error.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn set_error(&self, e: anyhow::Error) {
|
||||
let mut error = self.error.write().await;
|
||||
*error = Some(e.into());
|
||||
}
|
||||
|
||||
pub async fn clear_error(&self) {
|
||||
let mut error = self.error.write().await;
|
||||
*error = None;
|
||||
}
|
||||
|
||||
pub fn cancel(&self) {
|
||||
self.cancel.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn task to set the 'attempted_public_key'
|
||||
/// for easy manual connection acceptance later on.
|
||||
fn spawn_update_attempted_public_key(
|
||||
id: String,
|
||||
public_key: impl Into<Option<String>>,
|
||||
) {
|
||||
let public_key = public_key.into();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = update_one_by_id(
|
||||
&db_client().servers,
|
||||
&id,
|
||||
doc! {
|
||||
"$set": {
|
||||
"info.attempted_public_key": &public_key.as_deref().unwrap_or_default(),
|
||||
}
|
||||
},
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
warn!(
|
||||
"Failed to update attempted public_key for Server {id} | {e:?}"
|
||||
);
|
||||
};
|
||||
});
|
||||
}
|
||||
369
bin/core/src/connection/server.rs
Normal file
369
bin/core/src/connection/server.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
extract::{Query, WebSocketUpgrade},
|
||||
http::{HeaderMap, StatusCode},
|
||||
response::Response,
|
||||
};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{
|
||||
api::write::{CreateBuilder, CreateServer, UpdateResourceMeta},
|
||||
entities::{
|
||||
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
|
||||
komodo_timestamp,
|
||||
onboarding_key::OnboardingKey,
|
||||
server::{PartialServerConfig, Server},
|
||||
user::system_user,
|
||||
},
|
||||
};
|
||||
use periphery_client::{
|
||||
api::PeripheryConnectionQuery, transport::LoginMessage,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
use tracing::Instrument;
|
||||
use transport::{
|
||||
auth::{
|
||||
HeaderConnectionIdentifiers, LoginFlow, LoginFlowArgs,
|
||||
PublicKeyValidator, ServerLoginFlow,
|
||||
},
|
||||
websocket::{
|
||||
Websocket, WebsocketExt as _, axum::AxumWebsocket,
|
||||
login::LoginWebsocketExt,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
config::core_keys,
|
||||
helpers::query::id_or_name_filter,
|
||||
resource::KomodoResource,
|
||||
state::{db_client, periphery_connections},
|
||||
};
|
||||
|
||||
use super::PeripheryConnectionArgs;
|
||||
|
||||
pub async fn handler(
|
||||
Query(PeripheryConnectionQuery {
|
||||
server: server_query,
|
||||
}): Query<PeripheryConnectionQuery>,
|
||||
mut headers: HeaderMap,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
let identifiers =
|
||||
HeaderConnectionIdentifiers::extract(&mut headers)
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
|
||||
if server_query.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Must provide non-empty server specifier")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
// Handle connection vs. onboarding flow.
|
||||
match Server::coll()
|
||||
.find_one(id_or_name_filter(&server_query))
|
||||
.await
|
||||
.context("Failed to query database for Server")?
|
||||
{
|
||||
Some(server) => {
|
||||
existing_server_handler(server_query, server, identifiers, ws)
|
||||
.await
|
||||
}
|
||||
None if ObjectId::from_str(&server_query).is_err() => {
|
||||
onboard_server_handler(server_query, identifiers, ws).await
|
||||
}
|
||||
None => Err(
|
||||
anyhow!("Must provide name based Server specifier for onboarding flow, name cannot be valid ObjectId (hex)")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn existing_server_handler(
|
||||
server_query: String,
|
||||
server: Server,
|
||||
identifiers: HeaderConnectionIdentifiers,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
if !server.config.enabled {
|
||||
return Err(anyhow!("Server is Disabled."))
|
||||
.status_code(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
if !server.config.address.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Server is configured to use a Core -> Periphery connection."
|
||||
))
|
||||
.status_code(StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
let connections = periphery_connections();
|
||||
|
||||
// Ensure connected server can't get bumped off the connection.
|
||||
// Treat this as authorization issue.
|
||||
if let Some(existing_connection) = connections.get(&server.id).await
|
||||
&& existing_connection.connected()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("A Server '{server_query}' is already connected")
|
||||
.status_code(StatusCode::UNAUTHORIZED),
|
||||
);
|
||||
}
|
||||
|
||||
let (connection, mut receiver) = periphery_connections()
|
||||
.insert(
|
||||
server.id.clone(),
|
||||
PeripheryConnectionArgs::from_server(&server),
|
||||
)
|
||||
.await;
|
||||
|
||||
Ok(ws.on_upgrade(|socket| async move {
|
||||
let query =
|
||||
format!("server={}", urlencoding::encode(&server_query));
|
||||
let mut socket = AxumWebsocket(socket);
|
||||
|
||||
if let Err(e) = socket
|
||||
.send_message(LoginMessage::OnboardingFlow(false))
|
||||
.await
|
||||
.context("Failed to send Login OnboardingFlow false message")
|
||||
{
|
||||
connection.set_error(e).await;
|
||||
return;
|
||||
};
|
||||
|
||||
let span = info_span!(
|
||||
"PeripheryLogin",
|
||||
server_id = server.id,
|
||||
direction = "PeripheryToCore"
|
||||
);
|
||||
let login = async {
|
||||
connection
|
||||
.handle_login::<_, ServerLoginFlow>(
|
||||
&mut socket,
|
||||
identifiers.build(query.as_bytes()),
|
||||
)
|
||||
.await
|
||||
}
|
||||
.instrument(span)
|
||||
.await;
|
||||
|
||||
if let Err(e) = login {
|
||||
connection.set_error(e).await;
|
||||
return;
|
||||
}
|
||||
|
||||
connection.handle_socket(socket, &mut receiver).await
|
||||
}))
|
||||
}
|
||||
|
||||
async fn onboard_server_handler(
|
||||
server_query: String,
|
||||
identifiers: HeaderConnectionIdentifiers,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
Ok(ws.on_upgrade(|socket| async move {
|
||||
let query =
|
||||
format!("server={}", urlencoding::encode(&server_query));
|
||||
let mut socket = AxumWebsocket(socket);
|
||||
|
||||
if let Err(e) = socket.send_message(LoginMessage::OnboardingFlow(true)).await.context(
|
||||
"Failed to send Login OnboardingFlow true message",
|
||||
).context("Server onboarding error") {
|
||||
warn!("{e:#}");
|
||||
return;
|
||||
};
|
||||
|
||||
let onboarding_key = match ServerLoginFlow::login(LoginFlowArgs {
|
||||
socket: &mut socket,
|
||||
identifiers: identifiers.build(query.as_bytes()),
|
||||
private_key: core_keys().load().private.as_str(),
|
||||
public_key_validator: CreationKeyValidator,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(onboarding_key) => onboarding_key,
|
||||
Err(e) => {
|
||||
debug!("Server {server_query} failed to onboard | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Post onboarding login 1: Receive public key
|
||||
let public_key = match socket
|
||||
.recv_login_public_key()
|
||||
.await
|
||||
{
|
||||
Ok(public_key) => public_key,
|
||||
Err(e) => {
|
||||
warn!("Server {server_query} failed to onboard | failed to receive Server public key | {e:#}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let server_id = match create_server_maybe_builder(
|
||||
server_query,
|
||||
public_key.into_inner(),
|
||||
onboarding_key.copy_server,
|
||||
onboarding_key.tags,
|
||||
onboarding_key.create_builder
|
||||
).await {
|
||||
Ok(server_id) => server_id,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
if let Err(e) = socket
|
||||
.send_login_error(&e)
|
||||
.await
|
||||
.context("Failed to send Server creation failed to client")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = socket
|
||||
.send_message(LoginMessage::Success)
|
||||
.await
|
||||
.context("Failed to send Login Onboarding Successful message")
|
||||
{
|
||||
// Log additional error
|
||||
warn!("{e:#}");
|
||||
}
|
||||
|
||||
// Server created, close and trigger reconnect
|
||||
// and handling using existing server handler.
|
||||
let _ = socket.close().await;
|
||||
|
||||
// Add the server to onboarding key "Onboarded"
|
||||
let res = db_client()
|
||||
.onboarding_keys
|
||||
.update_one(
|
||||
doc! { "public_key": &onboarding_key.public_key },
|
||||
doc! { "$push": { "onboarded": server_id } },
|
||||
).await;
|
||||
if let Err(e) = res {
|
||||
warn!("Failed to update onboarding key 'onboarded' | {e:?}");
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn create_server_maybe_builder(
|
||||
server_query: String,
|
||||
public_key: String,
|
||||
copy_server: String,
|
||||
tags: Vec<String>,
|
||||
create_builder: bool,
|
||||
) -> anyhow::Result<String> {
|
||||
let config = if copy_server.is_empty() {
|
||||
PartialServerConfig {
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
}
|
||||
} else {
|
||||
let config = match db_client()
|
||||
.servers
|
||||
.find_one(id_or_name_filter(©_server))
|
||||
.await
|
||||
{
|
||||
Ok(Some(server)) => server.config,
|
||||
Ok(None) => {
|
||||
warn!(
|
||||
"Server onboarding: Failed to find Server {}",
|
||||
copy_server
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to query database for onboarding key 'copy_server' | {e:?}"
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
};
|
||||
PartialServerConfig {
|
||||
enabled: Some(true),
|
||||
address: None,
|
||||
..config.into()
|
||||
}
|
||||
};
|
||||
|
||||
let args = WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
};
|
||||
|
||||
let server = CreateServer {
|
||||
name: server_query.clone(),
|
||||
config,
|
||||
public_key: Some(public_key),
|
||||
}
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Server creation")?;
|
||||
|
||||
// Don't need to fail, only warn on this
|
||||
if let Err(e) = (UpdateResourceMeta {
|
||||
target: (&server).into(),
|
||||
tags: Some(tags),
|
||||
description: None,
|
||||
template: None,
|
||||
})
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Server creation")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
};
|
||||
|
||||
if create_builder {
|
||||
// Don't need to fail, only warn on this
|
||||
if let Err(e) = (CreateBuilder {
|
||||
name: server_query,
|
||||
config: PartialBuilderConfig::Server(
|
||||
PartialServerBuilderConfig {
|
||||
server_id: Some(server.id.clone()),
|
||||
},
|
||||
),
|
||||
})
|
||||
.resolve(&args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("Server onboarding flow failed at Builder creation")
|
||||
{
|
||||
warn!("{e:#}");
|
||||
};
|
||||
}
|
||||
|
||||
Ok(server.id)
|
||||
}
|
||||
|
||||
struct CreationKeyValidator;
|
||||
|
||||
impl PublicKeyValidator for CreationKeyValidator {
|
||||
type ValidationResult = OnboardingKey;
|
||||
async fn validate(
|
||||
&self,
|
||||
public_key: String,
|
||||
) -> anyhow::Result<Self::ValidationResult> {
|
||||
let onboarding_key = db_client()
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Matching Server onboarding key not found")?;
|
||||
// Check enabled and not expired.
|
||||
if onboarding_key.enabled
|
||||
&& (onboarding_key.expires == 0
|
||||
|| onboarding_key.expires > komodo_timestamp())
|
||||
{
|
||||
Ok(onboarding_key)
|
||||
} else {
|
||||
Err(anyhow!("Onboarding key is invalid"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use anyhow::anyhow;
|
||||
use cache::CloneCache;
|
||||
use komodo_client::{
|
||||
busy::Busy,
|
||||
entities::{
|
||||
@@ -8,24 +9,24 @@ use komodo_client::{
|
||||
deployment::DeploymentActionState,
|
||||
procedure::ProcedureActionState, repo::RepoActionState,
|
||||
server::ServerActionState, stack::StackActionState,
|
||||
sync::ResourceSyncActionState,
|
||||
swarm::SwarmActionState, sync::ResourceSyncActionState,
|
||||
},
|
||||
};
|
||||
|
||||
use super::cache::Cache;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ActionStates {
|
||||
pub server: Cache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub stack: Cache<String, Arc<ActionState<StackActionState>>>,
|
||||
pub swarm: CloneCache<String, Arc<ActionState<SwarmActionState>>>,
|
||||
pub server: CloneCache<String, Arc<ActionState<ServerActionState>>>,
|
||||
pub stack: CloneCache<String, Arc<ActionState<StackActionState>>>,
|
||||
pub deployment:
|
||||
Cache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub build: Cache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub repo: Cache<String, Arc<ActionState<RepoActionState>>>,
|
||||
CloneCache<String, Arc<ActionState<DeploymentActionState>>>,
|
||||
pub build: CloneCache<String, Arc<ActionState<BuildActionState>>>,
|
||||
pub repo: CloneCache<String, Arc<ActionState<RepoActionState>>>,
|
||||
pub procedure:
|
||||
Cache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
pub action: Cache<String, Arc<ActionState<ActionActionState>>>,
|
||||
pub sync: Cache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
CloneCache<String, Arc<ActionState<ProcedureActionState>>>,
|
||||
pub action: CloneCache<String, Arc<ActionState<ActionActionState>>>,
|
||||
pub sync:
|
||||
CloneCache<String, Arc<ActionState<ResourceSyncActionState>>>,
|
||||
}
|
||||
|
||||
/// Need to be able to check "busy" with write lock acquired.
|
||||
|
||||
@@ -3,11 +3,12 @@ use std::collections::HashMap;
|
||||
use komodo_client::entities::{
|
||||
action::Action, alerter::Alerter, build::Build, builder::Builder,
|
||||
deployment::Deployment, procedure::Procedure, repo::Repo,
|
||||
server::Server, stack::Stack, sync::ResourceSync,
|
||||
server::Server, stack::Stack, swarm::Swarm, sync::ResourceSync,
|
||||
};
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AllResourcesById {
|
||||
pub swarms: HashMap<String, Swarm>,
|
||||
pub servers: HashMap<String, Server>,
|
||||
pub deployments: HashMap<String, Deployment>,
|
||||
pub stacks: HashMap<String, Stack>,
|
||||
@@ -27,6 +28,10 @@ impl AllResourcesById {
|
||||
let id_to_tags = ↦
|
||||
let match_tags = &[];
|
||||
Ok(Self {
|
||||
swarms: crate::resource::get_id_to_resource_map::<Swarm>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
servers: crate::resource::get_id_to_resource_map::<Server>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user