forked from github-starred/komodo
Compare commits
293 Commits
v1.17.4-de
...
v2.0.0-dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1df36c4266 | ||
|
|
36f7ad33c7 | ||
|
|
ec34b2c139 | ||
|
|
d14c28d1f2 | ||
|
|
68f7a0e9ce | ||
|
|
50f0376f0a | ||
|
|
bbd53747ad | ||
|
|
6a2adf1f83 | ||
|
|
128b15b94f | ||
|
|
8d74b377b7 | ||
|
|
d7e972e5c6 | ||
|
|
e5cb4aac5a | ||
|
|
d0f62f8326 | ||
|
|
47c4091a4b | ||
|
|
973480e2b3 | ||
|
|
b9e1cc87d2 | ||
|
|
05d20c8603 | ||
|
|
fe2d68a001 | ||
|
|
26fd5b2a6d | ||
|
|
76457bcb61 | ||
|
|
ebd2c2238d | ||
|
|
b7fc1bef7b | ||
|
|
50b9f2e1bf | ||
|
|
41ce86f6ab | ||
|
|
7a21c01e52 | ||
|
|
e63e282510 | ||
|
|
5456b36c18 | ||
|
|
fcfb58a7e9 | ||
|
|
2203004a74 | ||
|
|
996fb49823 | ||
|
|
35d22c77a2 | ||
|
|
44ab89600f | ||
|
|
0900e48cb8 | ||
|
|
c530a46a27 | ||
|
|
f69c8db3ea | ||
|
|
48f2f651e1 | ||
|
|
bdb5b4185e | ||
|
|
42a7b8c19b | ||
|
|
ded17e4840 | ||
|
|
80fb1e6889 | ||
|
|
1dc861f538 | ||
|
|
3da63395fd | ||
|
|
c40cbc4d77 | ||
|
|
05e352e88c | ||
|
|
5884c09fb8 | ||
|
|
f8add38043 | ||
|
|
501f734e8b | ||
|
|
de62732ac8 | ||
|
|
bfa61058cd | ||
|
|
72ca6d9910 | ||
|
|
4d1ac32ad3 | ||
|
|
927e5959fa | ||
|
|
37ccc6e1ef | ||
|
|
deaa8754f3 | ||
|
|
dd8ac67c72 | ||
|
|
be4457c9cf | ||
|
|
1868421815 | ||
|
|
366f7a12b4 | ||
|
|
75119370df | ||
|
|
9e85b9d4c8 | ||
|
|
8afbbf23dc | ||
|
|
770a1116a1 | ||
|
|
0b4aebbc24 | ||
|
|
f1696e26e4 | ||
|
|
1a7b682301 | ||
|
|
b0110b05aa | ||
|
|
561b490f26 | ||
|
|
cac1f0b42e | ||
|
|
28886fb304 | ||
|
|
fb84d4cf7d | ||
|
|
31e9624556 | ||
|
|
3864bb7115 | ||
|
|
cea8601246 | ||
|
|
a546364bf3 | ||
|
|
c8c62ea562 | ||
|
|
845e8780c7 | ||
|
|
db60347566 | ||
|
|
c3ea0239d6 | ||
|
|
e9d13449bf | ||
|
|
2daa92a639 | ||
|
|
6473080078 | ||
|
|
d3957f65dc | ||
|
|
cb34969f1e | ||
|
|
55a0a8cd05 | ||
|
|
89f08372c6 | ||
|
|
6a3ce2d426 | ||
|
|
4928378d46 | ||
|
|
eea222cfba | ||
|
|
6e9cc2dc77 | ||
|
|
55d45084d0 | ||
|
|
9657a44049 | ||
|
|
51fa9ae3c2 | ||
|
|
5fd256444e | ||
|
|
059716f178 | ||
|
|
0bee1fe2c5 | ||
|
|
1e58c1a958 | ||
|
|
ed1431db0a | ||
|
|
dc769ff159 | ||
|
|
098f23ac4c | ||
|
|
03f577d22f | ||
|
|
95ca217362 | ||
|
|
6d61045764 | ||
|
|
34e075eaf3 | ||
|
|
232dc0bb4e | ||
|
|
0cc0ee2aab | ||
|
|
edebe925ff | ||
|
|
5fd45bbc7b | ||
|
|
0a490dadb2 | ||
|
|
23847c15bc | ||
|
|
0d238aee4f | ||
|
|
98ad6cf5fa | ||
|
|
e35b81630b | ||
|
|
1215852fe4 | ||
|
|
4164b76ff5 | ||
|
|
26a9daffeb | ||
|
|
8bb9f16e9b | ||
|
|
b6eaf76497 | ||
|
|
073893da0e | ||
|
|
e71547f1c2 | ||
|
|
1991627990 | ||
|
|
3434d827a3 | ||
|
|
1ef8b9878a | ||
|
|
07ddaa8377 | ||
|
|
142c08cde4 | ||
|
|
1aa1422faa | ||
|
|
1394e8a6b1 | ||
|
|
420ee10211 | ||
|
|
e918461dc5 | ||
|
|
4dc9ca27be | ||
|
|
f49b186f2f | ||
|
|
6e039b41f1 | ||
|
|
e7cd77b022 | ||
|
|
556cbd04c7 | ||
|
|
4e3d181466 | ||
|
|
5d4326f46f | ||
|
|
4bb486ad0a | ||
|
|
d29c5112d8 | ||
|
|
d41315b8a4 | ||
|
|
847404388c | ||
|
|
eef8ec59b8 | ||
|
|
9eb32f9ff5 | ||
|
|
859bfe67ef | ||
|
|
21ea469cd4 | ||
|
|
7fb902b892 | ||
|
|
c9c4ac47ee | ||
|
|
f228cd31f3 | ||
|
|
4feecb4b97 | ||
|
|
e2680d0942 | ||
|
|
7422c0730d | ||
|
|
37ac0dc7e3 | ||
|
|
dccaca1df4 | ||
|
|
886aea4c36 | ||
|
|
cbca070bae | ||
|
|
b4bdd401f6 | ||
|
|
e546166240 | ||
|
|
21689ce0ad | ||
|
|
941787db64 | ||
|
|
d4b1aacac3 | ||
|
|
30f89461bf | ||
|
|
a42d1397e9 | ||
|
|
b29313c28f | ||
|
|
08a246a90c | ||
|
|
1a08df28d0 | ||
|
|
a226ffc256 | ||
|
|
b385ee5ec3 | ||
|
|
c78c34357d | ||
|
|
4b7c692f00 | ||
|
|
1ac98a096e | ||
|
|
281a2dc1ce | ||
|
|
0fe91378a6 | ||
|
|
11e76d1cf2 | ||
|
|
a3bcd71105 | ||
|
|
3ecc56dd76 | ||
|
|
7239cbb19b | ||
|
|
a0540f7011 | ||
|
|
37aea7605e | ||
|
|
78be913541 | ||
|
|
c34f5ebf49 | ||
|
|
e5822cefb8 | ||
|
|
4baab194cf | ||
|
|
a896583da6 | ||
|
|
7b2674c38b | ||
|
|
d1e32989e3 | ||
|
|
e802bb3882 | ||
|
|
27a38b1bf5 | ||
|
|
2bc8a754be | ||
|
|
7a2a54bec1 | ||
|
|
6a15150d59 | ||
|
|
1b1dca76da | ||
|
|
a032f0f4ff | ||
|
|
2749d49435 | ||
|
|
d88e42ef2d | ||
|
|
a370e7d121 | ||
|
|
d139ad2b3d | ||
|
|
8d2d180398 | ||
|
|
37ca4ca986 | ||
|
|
33e73b8543 | ||
|
|
cf6e36e90c | ||
|
|
9eb8b32f4a | ||
|
|
b400add6f1 | ||
|
|
24adb89d25 | ||
|
|
4674b2badb | ||
|
|
65d1a69cb9 | ||
|
|
0da5718991 | ||
|
|
6b26cd120c | ||
|
|
28e1bb19a4 | ||
|
|
166107ac07 | ||
|
|
d77201880f | ||
|
|
1d7629e9b2 | ||
|
|
198f690ca5 | ||
|
|
531c79a144 | ||
|
|
d685862713 | ||
|
|
af0f245b5b | ||
|
|
cba36861b7 | ||
|
|
2c2c1d47b4 | ||
|
|
3a6b997241 | ||
|
|
7122f79b9d | ||
|
|
9bcee8122b | ||
|
|
a49c98946e | ||
|
|
7d222a7241 | ||
|
|
33501dac3e | ||
|
|
4675dfa736 | ||
|
|
0be51dc784 | ||
|
|
52453d1320 | ||
|
|
25da97ac1a | ||
|
|
02db5a11d3 | ||
|
|
89a5272246 | ||
|
|
ae51ea1ad6 | ||
|
|
3bdb4bea16 | ||
|
|
677bb14b5d | ||
|
|
6700700a80 | ||
|
|
996d4aa129 | ||
|
|
75894a7282 | ||
|
|
2a065edcf1 | ||
|
|
6f3703acfb | ||
|
|
59e989ecdf | ||
|
|
951ff34a9e | ||
|
|
2d83105500 | ||
|
|
3d455f5142 | ||
|
|
01de8c4a9b | ||
|
|
d5de338561 | ||
|
|
58c1afb8ef | ||
|
|
230f357b5a | ||
|
|
991c95fff0 | ||
|
|
f6243fe6b1 | ||
|
|
9feeccba6e | ||
|
|
673c7f3a6b | ||
|
|
39f900d651 | ||
|
|
8a06a0d6ce | ||
|
|
7789ee4f4a | ||
|
|
0472b6a7f7 | ||
|
|
d1d2227d36 | ||
|
|
cea7c5fc5e | ||
|
|
34a9f8eb9e | ||
|
|
494d01aeed | ||
|
|
084e2fec23 | ||
|
|
98d72fc908 | ||
|
|
20ac04fae5 | ||
|
|
a65fd4dca7 | ||
|
|
0873104b5a | ||
|
|
9a7b6ebd51 | ||
|
|
a4153fa28b | ||
|
|
e732da3b05 | ||
|
|
75ffbd559b | ||
|
|
cae80b43e5 | ||
|
|
d924a8ace4 | ||
|
|
dcfad5dc4e | ||
|
|
134d1697e9 | ||
|
|
3094d0036a | ||
|
|
ee5fd55cdb | ||
|
|
0ca126ff23 | ||
|
|
2fa9d9ecce | ||
|
|
118ae9b92c | ||
|
|
2205a81e79 | ||
|
|
e2280f38df | ||
|
|
545196d7eb | ||
|
|
23f8ecc1d9 | ||
|
|
4d401d7f20 | ||
|
|
4165e25332 | ||
|
|
4cc0817b0f | ||
|
|
51cf1e2b05 | ||
|
|
5309c70929 | ||
|
|
1278c62859 | ||
|
|
6d6acdbc0b | ||
|
|
d22000331e | ||
|
|
31034e5b34 | ||
|
|
a43e1f3f52 | ||
|
|
7a3b2b542d | ||
|
|
8d516d6d5f | ||
|
|
3e0d1befbd | ||
|
|
5dc609b206 | ||
|
|
f1127007c3 | ||
|
|
765e5a0df1 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
target
|
||||
node_modules
|
||||
dist
|
||||
deno.lock
|
||||
.env
|
||||
.env.development
|
||||
.DS_Store
|
||||
@@ -9,5 +10,4 @@ dist
|
||||
/frontend/build
|
||||
/lib/ts_client/build
|
||||
|
||||
creds.toml
|
||||
.dev
|
||||
|
||||
1
.kminclude
Normal file
1
.kminclude
Normal file
@@ -0,0 +1 @@
|
||||
.dev
|
||||
10
.vscode/resolver.code-snippets
vendored
10
.vscode/resolver.code-snippets
vendored
@@ -3,8 +3,8 @@
|
||||
"scope": "rust",
|
||||
"prefix": "resolve",
|
||||
"body": [
|
||||
"impl Resolve<${1}, User> for State {",
|
||||
"\tasync fn resolve(&self, ${1} { ${0} }: ${1}, _: User) -> anyhow::Result<${2}> {",
|
||||
"impl Resolve<${0}> for ${1} {",
|
||||
"\tasync fn resolve(self, _: &${0}) -> Result<Self::Response, Self::Error> {",
|
||||
"\t\ttodo!()",
|
||||
"\t}",
|
||||
"}"
|
||||
@@ -15,9 +15,9 @@
|
||||
"prefix": "static",
|
||||
"body": [
|
||||
"fn ${1}() -> &'static ${2} {",
|
||||
"\tstatic ${3}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${3}.get_or_init(|| {",
|
||||
"\t\t${0}",
|
||||
"\tstatic ${0}: OnceLock<${2}> = OnceLock::new();",
|
||||
"\t${0}.get_or_init(|| {",
|
||||
"\t\ttodo!()",
|
||||
"\t})",
|
||||
"}"
|
||||
]
|
||||
|
||||
2925
Cargo.lock
generated
2925
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
126
Cargo.toml
126
Cargo.toml
@@ -8,117 +8,141 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.17.3"
|
||||
version = "2.0.0-dev-59"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
repository = "https://github.com/moghtech/komodo"
|
||||
homepage = "https://komo.do"
|
||||
|
||||
[profile.release]
|
||||
strip = "debuginfo"
|
||||
|
||||
[workspace.dependencies]
|
||||
# LOCAL
|
||||
komodo_client = { path = "client/core/rs" }
|
||||
periphery_client = { path = "client/periphery/rs" }
|
||||
environment_file = { path = "lib/environment_file" }
|
||||
environment = { path = "lib/environment" }
|
||||
interpolate = { path = "lib/interpolate" }
|
||||
secret_file = { path = "lib/secret_file" }
|
||||
formatting = { path = "lib/formatting" }
|
||||
transport = { path = "lib/transport" }
|
||||
database = { path = "lib/database" }
|
||||
encoding = { path = "lib/encoding" }
|
||||
response = { path = "lib/response" }
|
||||
command = { path = "lib/command" }
|
||||
config = { path = "lib/config" }
|
||||
logger = { path = "lib/logger" }
|
||||
cache = { path = "lib/cache" }
|
||||
noise = { path = "lib/noise" }
|
||||
git = { path = "lib/git" }
|
||||
|
||||
# MOGH
|
||||
run_command = { version = "0.0.6", features = ["async_tokio"] }
|
||||
serror = { version = "0.5.0", default-features = false }
|
||||
slack = { version = "0.4.0", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
serror = { version = "0.5.3", default-features = false }
|
||||
slack = { version = "1.2.1", package = "slack_client_rs", default-features = false, features = ["rustls"] }
|
||||
derive_default_builder = "0.1.8"
|
||||
derive_empty_traits = "0.1.0"
|
||||
merge_config_files = "0.1.5"
|
||||
async_timing_util = "1.0.0"
|
||||
async_timing_util = "1.1.0"
|
||||
partial_derive2 = "0.4.3"
|
||||
derive_variants = "1.0.0"
|
||||
mongo_indexed = "2.0.1"
|
||||
mongo_indexed = "2.0.2"
|
||||
resolver_api = "3.0.0"
|
||||
toml_pretty = "1.1.2"
|
||||
mungos = "3.2.0"
|
||||
svi = "1.0.1"
|
||||
toml_pretty = "1.2.0"
|
||||
mungos = "3.2.2"
|
||||
svi = "1.2.0"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.15", default-features = false, features = ["json", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.44.1", features = ["full"] }
|
||||
tokio-util = "0.7.14"
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.47.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
pin-project-lite = "0.2.16"
|
||||
futures = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
axum-extra = { version = "0.10.0", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
|
||||
tokio-tungstenite = { version = "0.28.0", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.3", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.6", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
|
||||
tokio-tungstenite = "0.26.2"
|
||||
axum = { version = "0.8.6", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_yaml = "0.9.34"
|
||||
toml = "0.8.20"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
indexmap = { version = "2.11.4", features = ["serde"] }
|
||||
serde = { version = "1.0.227", features = ["derive"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
bson = { version = "2.15.0" } # must keep in sync with mongodb version
|
||||
serde_yaml_ng = "0.10.0"
|
||||
serde_json = "1.0.145"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.9.8"
|
||||
url = "2.5.7"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.98"
|
||||
thiserror = "2.0.12"
|
||||
anyhow = "1.0.100"
|
||||
thiserror = "2.0.17"
|
||||
|
||||
# LOGGING
|
||||
opentelemetry-otlp = { version = "0.29.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.29.0"
|
||||
tracing-opentelemetry = "0.30.0"
|
||||
opentelemetry = "0.29.0"
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["tls-roots", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.20", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
opentelemetry = "0.31.0"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
clap = { version = "4.5.36", features = ["derive"] }
|
||||
clap = { version = "4.5.49", features = ["derive"] }
|
||||
dotenvy = "0.15.7"
|
||||
envy = "0.4.2"
|
||||
|
||||
# CRYPTO / AUTH
|
||||
uuid = { version = "1.16.0", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "9.3.1", default-features = false }
|
||||
openidconnect = "4.0.0"
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "serde"] }
|
||||
jsonwebtoken = { version = "10.0.0", features = ["aws_lc_rs"] } # locked back with octorust
|
||||
rustls = { version = "0.23.32", features = ["aws-lc-rs"] }
|
||||
pem-rfc7468 = { version = "0.7.0", features = ["alloc"] }
|
||||
openidconnect = "4.0.1"
|
||||
urlencoding = "2.1.3"
|
||||
nom_pem = "4.0.0"
|
||||
bcrypt = "0.17.0"
|
||||
bcrypt = "0.17.1"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.26"
|
||||
pkcs8 = "0.10.2"
|
||||
snow = "0.10.0"
|
||||
hmac = "0.12.1"
|
||||
sha2 = "0.10.8"
|
||||
rand = "0.9.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.2"
|
||||
hex = "0.4.3"
|
||||
spki = "0.7.3"
|
||||
der = "0.7.10"
|
||||
|
||||
# SYSTEM
|
||||
bollard = "0.18.1"
|
||||
sysinfo = "0.34.2"
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.19.3"
|
||||
sysinfo = "0.37.1"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.6.1"
|
||||
aws-sdk-ec2 = "1.121.1"
|
||||
aws-credential-types = "1.2.2"
|
||||
aws-config = "1.8.8"
|
||||
aws-sdk-ec2 = "1.173.0"
|
||||
aws-credential-types = "1.2.8"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.4"
|
||||
chrono-tz = "0.10.3"
|
||||
chrono = "0.4.40"
|
||||
croner = "2.1.0"
|
||||
english-to-cron = "0.1.6"
|
||||
chrono-tz = "0.10.4"
|
||||
chrono = "0.4.42"
|
||||
croner = "3.0.0"
|
||||
|
||||
# MISC
|
||||
async-compression = { version = "0.4.32", features = ["tokio", "gzip"] }
|
||||
derive_builder = "0.20.2"
|
||||
shell-escape = "0.1.5"
|
||||
comfy-table = "7.2.1"
|
||||
typeshare = "1.0.4"
|
||||
octorust = "0.10.0"
|
||||
dashmap = "6.1.0"
|
||||
wildcard = "0.3.0"
|
||||
colored = "3.0.0"
|
||||
regex = "1.11.1"
|
||||
bson = "2.14.0"
|
||||
regex = "1.12.2"
|
||||
bytes = "1.10.1"
|
||||
2
action/build.ts
Normal file
2
action/build.ts
Normal file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("build-komodo");
|
||||
5
action/deno.json
Normal file
5
action/deno.json
Normal file
@@ -0,0 +1,5 @@
|
||||
{
|
||||
"imports": {
|
||||
"@std/toml": "jsr:@std/toml"
|
||||
}
|
||||
}
|
||||
2
action/deploy.ts
Executable file
2
action/deploy.ts
Executable file
@@ -0,0 +1,2 @@
|
||||
import { run } from "./run.ts";
|
||||
await run("deploy-komodo");
|
||||
52
action/run.ts
Normal file
52
action/run.ts
Normal file
@@ -0,0 +1,52 @@
|
||||
import * as TOML from "@std/toml";
|
||||
|
||||
export const run = async (action: string) => {
|
||||
const branch = await new Deno.Command("bash", {
|
||||
args: ["-c", "git rev-parse --abbrev-ref HEAD"],
|
||||
})
|
||||
.output()
|
||||
.then((r) => new TextDecoder("utf-8").decode(r.stdout).trim());
|
||||
|
||||
const cargo_toml_str = await Deno.readTextFile("Cargo.toml");
|
||||
const prev_version = (
|
||||
TOML.parse(cargo_toml_str) as {
|
||||
workspace: { package: { version: string } };
|
||||
}
|
||||
).workspace.package.version;
|
||||
|
||||
const [version, tag, count] = prev_version.split("-");
|
||||
const next_count = Number(count) + 1;
|
||||
|
||||
const next_version = `${version}-${tag}-${next_count}`;
|
||||
|
||||
await Deno.writeTextFile(
|
||||
"Cargo.toml",
|
||||
cargo_toml_str.replace(
|
||||
`version = "${prev_version}"`,
|
||||
`version = "${next_version}"`
|
||||
)
|
||||
);
|
||||
|
||||
// Cargo check first here to make sure lock file is updated before commit.
|
||||
const cmd = `
|
||||
cargo check
|
||||
echo ""
|
||||
|
||||
git add --all
|
||||
git commit --all --message "deploy ${version}-${tag}-${next_count}"
|
||||
|
||||
echo ""
|
||||
git push
|
||||
echo ""
|
||||
|
||||
km run -y action ${action} "KOMODO_BRANCH=${branch}&KOMODO_VERSION=${version}&KOMODO_TAG=${tag}-${next_count}"
|
||||
`
|
||||
.split("\n")
|
||||
.map((line) => line.trim())
|
||||
.filter((line) => line.length > 0 && !line.startsWith("//"))
|
||||
.join(" && ");
|
||||
|
||||
new Deno.Command("bash", {
|
||||
args: ["-c", cmd],
|
||||
}).spawn();
|
||||
};
|
||||
@@ -1,7 +1,8 @@
|
||||
## Builds the Komodo Core and Periphery binaries
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
FROM rust:1.86.0-bullseye AS builder
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -10,18 +11,22 @@ COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/periphery ./bin/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN \
|
||||
cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_periphery --release
|
||||
cargo build -p komodo_periphery --release && \
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
36
bin/chef.binaries.Dockerfile
Normal file
36
bin/chef.binaries.Dockerfile
Normal file
@@ -0,0 +1,36 @@
|
||||
## Builds the Komodo Core, Periphery, and Util binaries
|
||||
## for a specific architecture.
|
||||
|
||||
## Uses chef for dependency caching to help speed up back-to-back builds.
|
||||
|
||||
FROM lukemathwalker/cargo-chef:latest-rust-1.90.0-bullseye AS chef
|
||||
WORKDIR /builder
|
||||
|
||||
# Plan just the RECIPE to see if things have changed
|
||||
FROM chef AS planner
|
||||
COPY . .
|
||||
RUN cargo chef prepare --recipe-path recipe.json
|
||||
|
||||
FROM chef AS builder
|
||||
RUN cargo install cargo-strip
|
||||
COPY --from=planner /builder/recipe.json recipe.json
|
||||
# Build JUST dependencies - cached layer
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
# NOW copy again (this time into builder) and build app
|
||||
COPY . .
|
||||
RUN \
|
||||
cargo build --release --bin core && \
|
||||
cargo build --release --bin periphery && \
|
||||
cargo build --release --bin km && \
|
||||
cargo strip
|
||||
|
||||
# Copy just the binaries to scratch image
|
||||
FROM scratch
|
||||
|
||||
COPY --from=builder /builder/target/release/core /core
|
||||
COPY --from=builder /builder/target/release/periphery /periphery
|
||||
COPY --from=builder /builder/target/release/km /km
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Binaries"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -1,30 +1,37 @@
|
||||
[package]
|
||||
name = "komodo_cli"
|
||||
description = "Command line tool to execute Komodo actions"
|
||||
description = "Command line tool for Komodo"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors.workspace = true
|
||||
license.workspace = true
|
||||
homepage.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "komodo"
|
||||
name = "km"
|
||||
path = "src/main.rs"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
# local
|
||||
# komodo_client = "1.16.12"
|
||||
environment_file.workspace = true
|
||||
komodo_client.workspace = true
|
||||
database.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
noise.workspace = true
|
||||
# external
|
||||
tracing-subscriber.workspace = true
|
||||
merge_config_files.workspace = true
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
comfy-table.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_qs.workspace = true
|
||||
wildcard.workspace = true
|
||||
tracing.workspace = true
|
||||
colored.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
chrono.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
clap.workspace = true
|
||||
envy.workspace = true
|
||||
25
bin/cli/aio.Dockerfile
Normal file
25
bin/cli/aio.Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
FROM rust:1.90.0-bullseye AS builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile bin
|
||||
RUN cargo build -p komodo_cli --release && cargo strip
|
||||
|
||||
# Copy binaries to distroless base
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=builder /builder/target/release/km /usr/local/bin/km
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
135
bin/cli/docs/copy-database.md
Normal file
135
bin/cli/docs/copy-database.md
Normal file
@@ -0,0 +1,135 @@
|
||||
# Copy Database Utility
|
||||
|
||||
Copy the Komodo database contents between running, mongo-compatible databases.
|
||||
Can be used to move between MongoDB / FerretDB, or upgrade from FerretDB v1 to v2.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@source:27017
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@target:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
```
|
||||
|
||||
## FerretDB v2 Update Guide
|
||||
|
||||
Up to Komodo 1.17.5, users who wanted to use Postgres / Sqlite were instructed to deploy FerretDB v1.
|
||||
Now that v2 is out however, v1 will go largely unsupported. Users are recommended to migrate to v2 for
|
||||
the best performance and ongoing support / updates, however the internal data structures
|
||||
have changed and this cannot be done in-place.
|
||||
|
||||
Also note that FerretDB v2 no longer supports Sqlite, and only supports
|
||||
a [customized Postgres distribution](https://docs.ferretdb.io/installation/documentdb/docker/).
|
||||
Nonetheless, it remains a solid option for hosts which [do not support mongo](https://github.com/moghtech/komodo/issues/59).
|
||||
|
||||
Also note, the same basic process outlined below can also be used to move between MongoDB and FerretDB, just replace FerretDB v2
|
||||
with the database you wish to move to.
|
||||
|
||||
### **Step 1**: *Add* the new database to the top of your existing Komodo compose file.
|
||||
|
||||
**Don't forget to also add the new volumes.**
|
||||
|
||||
```yaml
|
||||
## In Komodo compose.yaml
|
||||
services:
|
||||
postgres2:
|
||||
# Recommended: Pin to a specific version
|
||||
# https://github.com/FerretDB/documentdb/pkgs/container/postgres-documentdb
|
||||
image: ghcr.io/ferretdb/postgres-documentdb
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
# ports:
|
||||
# - 5432:5432
|
||||
volumes:
|
||||
- postgres-data:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_USER: ${KOMODO_DB_USERNAME}
|
||||
POSTGRES_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
POSTGRES_DB: postgres # Do not change
|
||||
|
||||
ferretdb2:
|
||||
# Recommended: Pin to a specific version
|
||||
# https://github.com/FerretDB/FerretDB/pkgs/container/ferretdb
|
||||
image: ghcr.io/ferretdb/ferretdb
|
||||
labels:
|
||||
komodo.skip: # Prevent Komodo from stopping with StopAllContainers
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- postgres2
|
||||
# ports:
|
||||
# - 27017:27017
|
||||
volumes:
|
||||
- ferretdb-state:/state
|
||||
environment:
|
||||
FERRETDB_POSTGRESQL_URL: postgres://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@postgres2:5432/postgres
|
||||
|
||||
...(unchanged)
|
||||
|
||||
volumes:
|
||||
...(unchanged)
|
||||
postgres-data:
|
||||
ferretdb-state:
|
||||
```
|
||||
|
||||
### **Step 2**: *Add* the database copy utility to Komodo compose file.
|
||||
|
||||
The SOURCE_URI points to the existing database, ie the old FerretDB v1, and it depends
|
||||
on whether it was deployed using Postgres or Sqlite. The example below uses the Postgres one,
|
||||
but if you use Sqlite it should just be something like `mongodb://ferretdb:27017`.
|
||||
|
||||
```yaml
|
||||
## In Komodo compose.yaml
|
||||
services:
|
||||
...(new database)
|
||||
|
||||
copy_database:
|
||||
image: ghcr.io/moghtech/komodo-cli
|
||||
command: km database copy -y
|
||||
environment:
|
||||
KOMODO_DATABASE_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb:27017/${KOMODO_DATABASE_DB_NAME:-komodo}?authMechanism=PLAIN
|
||||
KOMODO_DATABASE_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
KOMODO_CLI_DATABASE_TARGET_URI: mongodb://${KOMODO_DB_USERNAME}:${KOMODO_DB_PASSWORD}@ferretdb2:27017
|
||||
KOMODO_CLI_DATABASE_TARGET_DB_NAME: ${KOMODO_DATABASE_DB_NAME:-komodo}
|
||||
|
||||
...(unchanged)
|
||||
```
|
||||
|
||||
### **Step 3**: *Compose Up* the new additions
|
||||
|
||||
Run `docker compose -p komodo --env-file compose.env -f xxxxx.compose.yaml up -d`, filling in the name of your compose.yaml.
|
||||
This will start up both the old and new database, and copy the data to the new one.
|
||||
|
||||
Wait a few moments for the `copy_database` service to finish. When it exits,
|
||||
confirm the logs show the data was moved successfully, and move on to the next step.
|
||||
|
||||
### **Step 4**: Point Komodo Core to the new database
|
||||
|
||||
In your Komodo compose.yaml, first *comment out* the `copy_database` service and old ferretdb v1 service/s.
|
||||
Then update the `core` service environment to point to `ferretdb2`.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
...
|
||||
|
||||
core:
|
||||
...(unchanged)
|
||||
environment:
|
||||
KOMODO_DATABASE_ADDRESS: ferretdb2:27017
|
||||
KOMODO_DATABASE_USERNAME: ${KOMODO_DB_USERNAME}
|
||||
KOMODO_DATABASE_PASSWORD: ${KOMODO_DB_PASSWORD}
|
||||
```
|
||||
|
||||
### **Step 5**: Final *Compose Up*
|
||||
|
||||
Repeat the same `docker compose` command as before to apply the changes, and then try navigating to your Komodo web page.
|
||||
If it works, congrats, **you are done**. You can clean up the compose file if you would like, removing the old volumes etc.
|
||||
|
||||
If it does not work, check the logs for any obvious issues, and if necessary you can undo the previous steps
|
||||
to go back to using the previous database.
|
||||
29
bin/cli/multi-arch.Dockerfile
Normal file
29
bin/cli/multi-arch.Dockerfile
Normal file
@@ -0,0 +1,29 @@
|
||||
## Assumes the latest binaries for x86_64 and aarch64 are already built (by binaries.Dockerfile).
|
||||
## Since theres no heavy build here, QEMU multi-arch builds are fine for this image.
|
||||
|
||||
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
|
||||
ARG X86_64_BINARIES=${BINARIES_IMAGE}-x86_64
|
||||
ARG AARCH64_BINARIES=${BINARIES_IMAGE}-aarch64
|
||||
|
||||
# This is required to work with COPY --from
|
||||
FROM ${X86_64_BINARIES} AS x86_64
|
||||
FROM ${AARCH64_BINARIES} AS aarch64
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
## Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /km /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /km /app/arch/linux/arm64
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/arch
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
4
bin/cli/runfile.toml
Normal file
4
bin/cli/runfile.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[install-cli]
|
||||
alias = "ic"
|
||||
description = "installs the komodo-cli, available on the command line as 'km'"
|
||||
cmd = "cargo install --path ."
|
||||
18
bin/cli/single-arch.Dockerfile
Normal file
18
bin/cli/single-arch.Dockerfile
Normal file
@@ -0,0 +1,18 @@
|
||||
## Assumes the latest binaries for the required arch are already built (by binaries.Dockerfile).
|
||||
|
||||
ARG BINARIES_IMAGE=ghcr.io/moghtech/komodo-binaries:latest
|
||||
|
||||
# This is required to work with COPY --from
|
||||
FROM ${BINARIES_IMAGE} AS binaries
|
||||
|
||||
FROM gcr.io/distroless/cc
|
||||
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
|
||||
CMD [ "km" ]
|
||||
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo CLI"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
@@ -1,55 +0,0 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
use komodo_client::api::execute::Execution;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(version, about, long_about = None)]
|
||||
pub struct CliArgs {
|
||||
/// Sync or Exec
|
||||
#[command(subcommand)]
|
||||
pub command: Command,
|
||||
|
||||
/// The path to a creds file.
|
||||
///
|
||||
/// Note: If each of `url`, `key` and `secret` are passed,
|
||||
/// no file is required at this path.
|
||||
#[arg(long, default_value_t = default_creds())]
|
||||
pub creds: String,
|
||||
|
||||
/// Pass url in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub url: Option<String>,
|
||||
/// Pass api key in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub key: Option<String>,
|
||||
/// Pass api secret in args instead of creds file
|
||||
#[arg(long)]
|
||||
pub secret: Option<String>,
|
||||
|
||||
/// Always continue on user confirmation prompts.
|
||||
#[arg(long, short, default_value_t = false)]
|
||||
pub yes: bool,
|
||||
}
|
||||
|
||||
fn default_creds() -> String {
|
||||
let home =
|
||||
std::env::var("HOME").unwrap_or_else(|_| String::from("/root"));
|
||||
format!("{home}/.config/komodo/creds.toml")
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Runs an execution
|
||||
Execute {
|
||||
#[command(subcommand)]
|
||||
execution: Execution,
|
||||
},
|
||||
// Room for more
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct CredsFile {
|
||||
pub url: String,
|
||||
pub key: String,
|
||||
pub secret: String,
|
||||
}
|
||||
312
bin/cli/src/command/container.rs
Normal file
312
bin/cli/src/command/container.rs
Normal file
@@ -0,0 +1,312 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Color};
|
||||
use futures_util::{
|
||||
FutureExt, TryStreamExt, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
InspectDockerContainer, ListAllDockerContainers, ListServers,
|
||||
},
|
||||
entities::{
|
||||
config::cli::args::container::{
|
||||
Container, ContainerCommand, InspectContainer,
|
||||
},
|
||||
docker::{
|
||||
self,
|
||||
container::{ContainerListItem, ContainerStateStatusEnum},
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
command::{
|
||||
PrintTable, clamp_sha, matches_wildcards, parse_wildcards,
|
||||
print_items,
|
||||
},
|
||||
config::cli_config,
|
||||
};
|
||||
|
||||
pub async fn handle(container: &Container) -> anyhow::Result<()> {
|
||||
match &container.command {
|
||||
None => list_containers(container).await,
|
||||
Some(ContainerCommand::Inspect(inspect)) => {
|
||||
inspect_container(inspect).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_containers(
|
||||
Container {
|
||||
all,
|
||||
down,
|
||||
links,
|
||||
reverse,
|
||||
containers: names,
|
||||
images,
|
||||
networks,
|
||||
servers,
|
||||
format,
|
||||
command: _,
|
||||
}: &Container,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
let containers = containers.into_iter().map(|c| {
|
||||
let server = if let Some(server_id) = c.server_id.as_ref()
|
||||
&& let Some(server) = server_map.get(server_id)
|
||||
{
|
||||
server
|
||||
} else {
|
||||
return (None, c);
|
||||
};
|
||||
(Some(server.name.as_str()), c)
|
||||
});
|
||||
|
||||
let names = parse_wildcards(names);
|
||||
let servers = parse_wildcards(servers);
|
||||
let images = parse_wildcards(images);
|
||||
let networks = parse_wildcards(networks);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|(server_name, c)| {
|
||||
let state_check = if *all {
|
||||
true
|
||||
} else if *down {
|
||||
!matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
} else {
|
||||
matches!(c.state, ContainerStateStatusEnum::Running)
|
||||
};
|
||||
let network_check = matches_wildcards(
|
||||
&networks,
|
||||
&c.network_mode
|
||||
.as_deref()
|
||||
.map(|n| vec![n])
|
||||
.unwrap_or_default(),
|
||||
) || matches_wildcards(
|
||||
&networks,
|
||||
&c.networks.iter().map(String::as_str).collect::<Vec<_>>(),
|
||||
);
|
||||
state_check
|
||||
&& network_check
|
||||
&& matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&server_name
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
&& matches_wildcards(
|
||||
&images,
|
||||
&c.image.as_deref().map(|i| vec![i]).unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
containers.sort_by(|(a_s, a), (b_s, b)| {
|
||||
a.state
|
||||
.cmp(&b.state)
|
||||
.then(a.name.cmp(&b.name))
|
||||
.then(a_s.cmp(b_s))
|
||||
.then(a.network_mode.cmp(&b.network_mode))
|
||||
.then(a.image.cmp(&b.image))
|
||||
});
|
||||
if *reverse {
|
||||
containers.reverse();
|
||||
}
|
||||
print_items(containers, *format, *links)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn inspect_container(
|
||||
inspect: &InspectContainer,
|
||||
) -> anyhow::Result<()> {
|
||||
let client = super::komodo_client().await?;
|
||||
let (server_map, mut containers) = tokio::try_join!(
|
||||
client
|
||||
.read(ListServers::default())
|
||||
.map(|res| res.map(|res| res
|
||||
.into_iter()
|
||||
.map(|s| (s.id.clone(), s))
|
||||
.collect::<HashMap<_, _>>())),
|
||||
client.read(ListAllDockerContainers {
|
||||
servers: Default::default()
|
||||
}),
|
||||
)?;
|
||||
|
||||
containers.iter_mut().for_each(|c| {
|
||||
let Some(server_id) = c.server_id.as_ref() else {
|
||||
return;
|
||||
};
|
||||
let Some(server) = server_map.get(server_id) else {
|
||||
c.server_id = Some(String::from("Unknown"));
|
||||
return;
|
||||
};
|
||||
c.server_id = Some(server.name.clone());
|
||||
});
|
||||
|
||||
let names = [inspect.container.to_string()];
|
||||
let names = parse_wildcards(&names);
|
||||
let servers = parse_wildcards(&inspect.servers);
|
||||
|
||||
let mut containers = containers
|
||||
.into_iter()
|
||||
.filter(|c| {
|
||||
matches_wildcards(&names, &[c.name.as_str()])
|
||||
&& matches_wildcards(
|
||||
&servers,
|
||||
&c.server_id
|
||||
.as_deref()
|
||||
.map(|i| vec![i])
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
})
|
||||
.map(|c| async move {
|
||||
client
|
||||
.read(InspectDockerContainer {
|
||||
container: c.name,
|
||||
server: c.server_id.context("No server...")?,
|
||||
})
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
containers.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
|
||||
match containers.len() {
|
||||
0 => {
|
||||
println!(
|
||||
"{}: Did not find any containers matching '{}'",
|
||||
"INFO".green(),
|
||||
inspect.container.bold()
|
||||
);
|
||||
}
|
||||
1 => {
|
||||
println!("{}", serialize_container(inspect, &containers[0])?);
|
||||
}
|
||||
_ => {
|
||||
let containers = containers
|
||||
.iter()
|
||||
.map(|c| serialize_container(inspect, c))
|
||||
.collect::<anyhow::Result<Vec<_>>>()?
|
||||
.join("\n");
|
||||
println!("{containers}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn serialize_container(
|
||||
inspect: &InspectContainer,
|
||||
container: &docker::container::Container,
|
||||
) -> anyhow::Result<String> {
|
||||
let res = if inspect.state {
|
||||
serde_json::to_string_pretty(&container.state)
|
||||
} else if inspect.mounts {
|
||||
serde_json::to_string_pretty(&container.mounts)
|
||||
} else if inspect.host_config {
|
||||
serde_json::to_string_pretty(&container.host_config)
|
||||
} else if inspect.config {
|
||||
serde_json::to_string_pretty(&container.config)
|
||||
} else if inspect.network_settings {
|
||||
serde_json::to_string_pretty(&container.network_settings)
|
||||
} else {
|
||||
serde_json::to_string_pretty(container)
|
||||
}
|
||||
.context("Failed to serialize items to JSON")?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// (Option<Server Name>, Container)
|
||||
impl PrintTable for (Option<&'_ str>, ContainerListItem) {
|
||||
fn header(links: bool) -> &'static [&'static str] {
|
||||
if links {
|
||||
&[
|
||||
"Container",
|
||||
"State",
|
||||
"Server",
|
||||
"Ports",
|
||||
"Networks",
|
||||
"Image",
|
||||
"Link",
|
||||
]
|
||||
} else {
|
||||
&["Container", "State", "Server", "Ports", "Networks", "Image"]
|
||||
}
|
||||
}
|
||||
fn row(self, links: bool) -> Vec<Cell> {
|
||||
let color = match self.1.state {
|
||||
ContainerStateStatusEnum::Running => Color::Green,
|
||||
ContainerStateStatusEnum::Paused => Color::DarkYellow,
|
||||
ContainerStateStatusEnum::Empty => Color::Grey,
|
||||
_ => Color::Red,
|
||||
};
|
||||
let mut networks = HashSet::new();
|
||||
if let Some(network) = self.1.network_mode {
|
||||
networks.insert(network);
|
||||
}
|
||||
for network in self.1.networks {
|
||||
networks.insert(network);
|
||||
}
|
||||
let mut networks = networks.into_iter().collect::<Vec<_>>();
|
||||
networks.sort();
|
||||
let mut ports = self
|
||||
.1
|
||||
.ports
|
||||
.into_iter()
|
||||
.flat_map(|p| p.public_port.map(|p| p.to_string()))
|
||||
.collect::<HashSet<_>>()
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
ports.sort();
|
||||
let ports = if ports.is_empty() {
|
||||
Cell::new("")
|
||||
} else {
|
||||
Cell::new(format!(":{}", ports.join(", :")))
|
||||
};
|
||||
|
||||
let image = self.1.image.as_deref().unwrap_or("Unknown");
|
||||
let mut res = vec![
|
||||
Cell::new(self.1.name.clone()).add_attribute(Attribute::Bold),
|
||||
Cell::new(self.1.state.to_string())
|
||||
.fg(color)
|
||||
.add_attribute(Attribute::Bold),
|
||||
Cell::new(self.0.unwrap_or("Unknown")),
|
||||
ports,
|
||||
Cell::new(networks.join(", ")),
|
||||
Cell::new(clamp_sha(image)),
|
||||
];
|
||||
if !links {
|
||||
return res;
|
||||
}
|
||||
let link = if let Some(server_id) = self.1.server_id {
|
||||
format!(
|
||||
"{}/servers/{server_id}/container/{}",
|
||||
cli_config().host,
|
||||
self.1.name
|
||||
)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
res.push(Cell::new(link));
|
||||
res
|
||||
}
|
||||
}
|
||||
366
bin/cli/src/command/database.rs
Normal file
366
bin/cli/src/command/database.rs
Normal file
@@ -0,0 +1,366 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::entities::{
|
||||
config::cli::args::database::DatabaseCommand, optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn handle(command: &DatabaseCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
DatabaseCommand::Backup { yes, .. } => backup(*yes).await,
|
||||
DatabaseCommand::Restore {
|
||||
restore_folder,
|
||||
index,
|
||||
yes,
|
||||
..
|
||||
} => restore(restore_folder.as_deref(), *index, *yes).await,
|
||||
DatabaseCommand::Prune { yes, .. } => prune(*yes).await,
|
||||
DatabaseCommand::Copy { yes, index, .. } => {
|
||||
copy(*index, *yes).await
|
||||
}
|
||||
DatabaseCommand::V1Downgrade { yes } => v1_downgrade(*yes).await,
|
||||
}
|
||||
}
|
||||
|
||||
async fn backup(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup".green().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Backup all database contents to gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
database::utils::backup(&db, &config.backups_folder).await?;
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Know that new backup was taken successfully at this point,
|
||||
// safe to prune old backup folders
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn restore(
|
||||
restore_folder: Option<&Path>,
|
||||
index: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Restore".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Restores database contents from gzip compressed files."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
println!(
|
||||
"\n{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if let Some(restore_folder) = restore_folder {
|
||||
println!("{}: {restore_folder:?}", " - Restore Folder".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start restore", yes)?;
|
||||
|
||||
let db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::restore(
|
||||
&db,
|
||||
&config.backups_folder,
|
||||
restore_folder,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn prune(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Backup Prune".cyan().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Prunes database backup folders when greater than the configured amount."
|
||||
.dimmed()
|
||||
);
|
||||
println!(
|
||||
"{}: {:?}",
|
||||
" - Backups Folder".dimmed(),
|
||||
config.backups_folder
|
||||
);
|
||||
if config.max_backups == 0 {
|
||||
println!(
|
||||
"{}{}",
|
||||
" - Backup pruning".dimmed(),
|
||||
"disabled".red().dimmed()
|
||||
);
|
||||
} else {
|
||||
println!("{}: {}", " - Max Backups".dimmed(), config.max_backups);
|
||||
}
|
||||
|
||||
// Early return if backup pruning disabled
|
||||
if config.max_backups == 0 {
|
||||
info!(
|
||||
"Backup pruning is disabled, enabled using 'max_backups' (KOMODO_CLI_MAX_BACKUPS)"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start backup prune", yes)?;
|
||||
|
||||
prune_inner().await
|
||||
}
|
||||
|
||||
async fn prune_inner() -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
let mut backups_dir =
|
||||
match tokio::fs::read_dir(&config.backups_folder)
|
||||
.await
|
||||
.context("Failed to read backups folder for prune")
|
||||
{
|
||||
Ok(backups_dir) => backups_dir,
|
||||
Err(e) => {
|
||||
warn!("{e:#}");
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let mut backup_folders = Vec::new();
|
||||
loop {
|
||||
match backups_dir.next_entry().await {
|
||||
Ok(Some(entry)) => {
|
||||
let Ok(metadata) = entry.metadata().await else {
|
||||
continue;
|
||||
};
|
||||
if metadata.is_dir() {
|
||||
backup_folders.push(entry.path());
|
||||
}
|
||||
}
|
||||
Ok(None) => break,
|
||||
Err(_) => {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Ordered from oldest -> newest
|
||||
backup_folders.sort();
|
||||
|
||||
let max_backups = config.max_backups as usize;
|
||||
let backup_folders_len = backup_folders.len();
|
||||
|
||||
// Early return if under the backup count threshold
|
||||
if backup_folders_len <= max_backups {
|
||||
info!("No backups to prune");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let to_delete =
|
||||
&backup_folders[..(backup_folders_len - max_backups)];
|
||||
|
||||
info!("Pruning old backups: {to_delete:?}");
|
||||
|
||||
for path in to_delete {
|
||||
if let Err(e) =
|
||||
tokio::fs::remove_dir_all(path).await.with_context(|| {
|
||||
format!("Failed to delete backup folder at {path:?}")
|
||||
})
|
||||
{
|
||||
warn!("{e:#}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn copy(index: bool, yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} Utility 🦎",
|
||||
"Komodo".bold(),
|
||||
"Copy".blue().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Copies database contents to another database.".dimmed()
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
if let Some(uri) = optional_string(&config.database_target.uri) {
|
||||
println!("{}: {}", " - Target URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) =
|
||||
optional_string(&config.database_target.address)
|
||||
{
|
||||
println!("{}: {address}", " - Target Address".dimmed());
|
||||
}
|
||||
if let Some(username) =
|
||||
optional_string(&config.database_target.username)
|
||||
{
|
||||
println!("{}: {username}", " - Target Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Name".dimmed(),
|
||||
config.database_target.db_name,
|
||||
);
|
||||
if !index {
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Target Db Indexing".dimmed(),
|
||||
"DISABLED".red(),
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("start copy", yes)?;
|
||||
|
||||
let source_db = database::init(&config.database).await?;
|
||||
let target_db = if index {
|
||||
database::Client::new(&config.database_target).await?.db
|
||||
} else {
|
||||
database::init(&config.database_target).await?
|
||||
};
|
||||
|
||||
database::utils::copy(&source_db, &target_db).await
|
||||
}
|
||||
|
||||
async fn v1_downgrade(yes: bool) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!(
|
||||
"\n🦎 {} Database {} 🦎",
|
||||
"Komodo".bold(),
|
||||
"V1 Downgrade".purple().bold()
|
||||
);
|
||||
println!(
|
||||
"\n{}\n",
|
||||
" - Downgrade the database to V1 compatible data structures."
|
||||
.dimmed()
|
||||
);
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}\n",
|
||||
" - Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("run downgrade", yes)?;
|
||||
|
||||
let db = database::init(&config.database).await?;
|
||||
|
||||
db.collection::<Document>("Server")
|
||||
.update_many(doc! {}, doc! { "$set": { "info": null } })
|
||||
.await
|
||||
.context("Failed to downgrade Server schema")?;
|
||||
|
||||
info!(
|
||||
"V1 Downgrade complete. Ready to downgrade to komodo-core:1 ✅"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
586
bin/cli/src/command/execute.rs
Normal file
586
bin/cli/src/command/execute.rs
Normal file
@@ -0,0 +1,586 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use futures_util::{StreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchExecutionResponseItem, Execution,
|
||||
},
|
||||
entities::{resource_link, update::Update},
|
||||
};
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
enum ExecutionResult {
|
||||
Single(Box<Update>),
|
||||
Batch(BatchExecutionResponse),
|
||||
}
|
||||
|
||||
pub async fn handle(
|
||||
execution: &Execution,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
println!("Finished doing nothing. Exiting...");
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchCloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchBuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelRepoBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteNetwork(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteImage(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteVolume(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneVolumes(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneDockerBuilders(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneBuildx(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneSystem(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CommitSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunStackService(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::SendAlert(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::ClearRepoCache(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BackupCoreDatabase(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::GlobalAutoUpdate(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateAllServerKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RotateCoreKeys(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
super::wait_for_enter("run execution", yes)?;
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let res = match execution.clone() {
|
||||
Execution::RunAction(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunAction(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunProcedure(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunProcedure(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchRunBuild(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Deploy(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeploy(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyDeployment(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyDeployment(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CloneRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchCloneRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::BuildRepo(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchBuildRepo(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::CancelRepoBuild(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyContainer(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopAllContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneContainers(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteNetwork(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneNetworks(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteImage(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneImages(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeleteVolume(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneVolumes(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneDockerBuilders(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneBuildx(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PruneSystem(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RunSync(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::CommitSync(request) => client
|
||||
.write(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DeployStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::DeployStackIfChanged(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDeployStackIfChanged(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::PullStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchPullStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::StartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RestartStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::PauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::UnpauseStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::StopStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::DestroyStack(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BatchDestroyStack(request) => {
|
||||
client.execute(request).await.map(ExecutionResult::Batch)
|
||||
}
|
||||
Execution::RunStackService(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::TestAlerter(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::SendAlert(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::ClearRepoCache(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::BackupCoreDatabase(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::GlobalAutoUpdate(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateAllServerKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::RotateCoreKeys(request) => client
|
||||
.execute(request)
|
||||
.await
|
||||
.map(|u| ExecutionResult::Single(u.into())),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
println!("Finished sleeping!");
|
||||
std::process::exit(0)
|
||||
}
|
||||
Execution::None(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(ExecutionResult::Single(update)) => {
|
||||
poll_update_until_complete(&update).await
|
||||
}
|
||||
Ok(ExecutionResult::Batch(updates)) => {
|
||||
let mut handles = updates
|
||||
.iter()
|
||||
.map(|update| async move {
|
||||
match update {
|
||||
BatchExecutionResponseItem::Ok(update) => {
|
||||
poll_update_until_complete(update).await
|
||||
}
|
||||
BatchExecutionResponseItem::Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>();
|
||||
while let Some(res) = handles.next().await {
|
||||
match res {
|
||||
Ok(()) => {}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("{e:#?}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn poll_update_until_complete(
|
||||
update: &Update,
|
||||
) -> anyhow::Result<()> {
|
||||
let link = if update.id.is_empty() {
|
||||
let (resource_type, id) = update.target.extract_variant_id();
|
||||
resource_link(&cli_config().host, resource_type, id)
|
||||
} else {
|
||||
format!("{}/updates/{}", cli_config().host, update.id)
|
||||
};
|
||||
println!("Link: '{}'", link.bold());
|
||||
|
||||
let client = super::komodo_client().await?;
|
||||
|
||||
let timer = tokio::time::Instant::now();
|
||||
let update = client.poll_update_until_complete(&update.id).await?;
|
||||
if update.success {
|
||||
println!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION SUCCESSFUL".green(),
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"FINISHED in {}: {}",
|
||||
format!("{:.1?}", timer.elapsed()).bold(),
|
||||
"EXECUTION FAILED".red(),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
1171
bin/cli/src/command/list.rs
Normal file
1171
bin/cli/src/command/list.rs
Normal file
File diff suppressed because it is too large
Load Diff
181
bin/cli/src/command/mod.rs
Normal file
181
bin/cli/src/command/mod.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use chrono::TimeZone;
|
||||
use colored::Colorize;
|
||||
use comfy_table::{Attribute, Cell, Table};
|
||||
use komodo_client::{
|
||||
KomodoClient,
|
||||
entities::config::cli::{CliTableBorders, args::CliFormat},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use tokio::sync::OnceCell;
|
||||
use wildcard::Wildcard;
|
||||
|
||||
use crate::config::cli_config;
|
||||
|
||||
pub mod container;
|
||||
pub mod database;
|
||||
pub mod execute;
|
||||
pub mod list;
|
||||
pub mod update;
|
||||
|
||||
async fn komodo_client() -> anyhow::Result<&'static KomodoClient> {
|
||||
static KOMODO_CLIENT: OnceCell<KomodoClient> =
|
||||
OnceCell::const_new();
|
||||
KOMODO_CLIENT
|
||||
.get_or_try_init(|| async {
|
||||
let config = cli_config();
|
||||
let (Some(key), Some(secret)) =
|
||||
(&config.cli_key, &config.cli_secret)
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"Must provide both cli_key and cli_secret"
|
||||
));
|
||||
};
|
||||
KomodoClient::new(&config.host, key, secret)
|
||||
.with_healthcheck()
|
||||
.await
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
fn wait_for_enter(
|
||||
press_enter_to: &str,
|
||||
skip: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
if skip {
|
||||
println!();
|
||||
return Ok(());
|
||||
}
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sanitizes uris of the form:
|
||||
/// `protocol://username:password@address`
|
||||
fn sanitize_uri(uri: &str) -> String {
|
||||
// protocol: `mongodb`
|
||||
// credentials_address: `username:password@address`
|
||||
let Some((protocol, credentials_address)) = uri.split_once("://")
|
||||
else {
|
||||
// If no protocol, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
// credentials: `username:password`
|
||||
let Some((credentials, address)) =
|
||||
credentials_address.split_once('@')
|
||||
else {
|
||||
// If no credentials, return as-is
|
||||
return uri.to_string();
|
||||
};
|
||||
|
||||
match credentials.split_once(':') {
|
||||
Some((username, _)) => {
|
||||
format!("{protocol}://{username}:*****@{address}")
|
||||
}
|
||||
None => {
|
||||
format!("{protocol}://*****@{address}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn print_items<T: PrintTable + Serialize>(
|
||||
items: Vec<T>,
|
||||
format: CliFormat,
|
||||
links: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
match format {
|
||||
CliFormat::Table => {
|
||||
let mut table = Table::new();
|
||||
let preset = {
|
||||
use comfy_table::presets::*;
|
||||
match cli_config().table_borders {
|
||||
None | Some(CliTableBorders::Horizontal) => {
|
||||
UTF8_HORIZONTAL_ONLY
|
||||
}
|
||||
Some(CliTableBorders::Vertical) => UTF8_FULL_CONDENSED,
|
||||
Some(CliTableBorders::Inside) => UTF8_NO_BORDERS,
|
||||
Some(CliTableBorders::Outside) => UTF8_BORDERS_ONLY,
|
||||
Some(CliTableBorders::All) => UTF8_FULL,
|
||||
}
|
||||
};
|
||||
table.load_preset(preset).set_header(
|
||||
T::header(links)
|
||||
.iter()
|
||||
.map(|h| Cell::new(h).add_attribute(Attribute::Bold)),
|
||||
);
|
||||
for item in items {
|
||||
table.add_row(item.row(links));
|
||||
}
|
||||
println!("{table}");
|
||||
}
|
||||
CliFormat::Json => {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&items)
|
||||
.context("Failed to serialize items to JSON")?
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
trait PrintTable {
|
||||
fn header(links: bool) -> &'static [&'static str];
|
||||
fn row(self, links: bool) -> Vec<Cell>;
|
||||
}
|
||||
|
||||
fn parse_wildcards(items: &[String]) -> Vec<Wildcard<'_>> {
|
||||
items
|
||||
.iter()
|
||||
.flat_map(|i| {
|
||||
Wildcard::new(i.as_bytes()).inspect_err(|e| {
|
||||
warn!("Failed to parse wildcard: {i} | {e:?}")
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn matches_wildcards(
|
||||
wildcards: &[Wildcard<'_>],
|
||||
items: &[&str],
|
||||
) -> bool {
|
||||
if wildcards.is_empty() {
|
||||
return true;
|
||||
}
|
||||
items.iter().any(|item| {
|
||||
wildcards.iter().any(|wc| wc.is_match(item.as_bytes()))
|
||||
})
|
||||
}
|
||||
|
||||
fn format_timetamp(ts: i64) -> anyhow::Result<String> {
|
||||
let ts = chrono::Local
|
||||
.timestamp_millis_opt(ts)
|
||||
.single()
|
||||
.context("Invalid ts")?
|
||||
.format("%m/%d %H:%M:%S")
|
||||
.to_string();
|
||||
Ok(ts)
|
||||
}
|
||||
|
||||
fn clamp_sha(maybe_sha: &str) -> String {
|
||||
if maybe_sha.starts_with("sha256:") {
|
||||
maybe_sha[0..20].to_string() + "..."
|
||||
} else {
|
||||
maybe_sha.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
// fn text_link(link: &str, text: &str) -> String {
|
||||
// format!("\x1b]8;;{link}\x07{text}\x1b]8;;\x07")
|
||||
// }
|
||||
43
bin/cli/src/command/update/mod.rs
Normal file
43
bin/cli/src/command/update/mod.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use komodo_client::entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateCommand,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
};
|
||||
|
||||
mod resource;
|
||||
mod user;
|
||||
mod variable;
|
||||
|
||||
pub async fn handle(command: &UpdateCommand) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateCommand::Build(update) => {
|
||||
resource::update::<PartialBuildConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Deployment(update) => {
|
||||
resource::update::<PartialDeploymentConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Repo(update) => {
|
||||
resource::update::<PartialRepoConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Server(update) => {
|
||||
resource::update::<PartialServerConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Stack(update) => {
|
||||
resource::update::<PartialStackConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Sync(update) => {
|
||||
resource::update::<PartialResourceSyncConfig>(update).await
|
||||
}
|
||||
UpdateCommand::Variable {
|
||||
name,
|
||||
value,
|
||||
secret,
|
||||
yes,
|
||||
} => variable::update(name, value, *secret, *yes).await,
|
||||
UpdateCommand::User { username, command } => {
|
||||
user::update(username, command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
152
bin/cli/src/command/update/resource.rs
Normal file
152
bin/cli/src/command/update/resource.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
UpdateBuild, UpdateDeployment, UpdateRepo, UpdateResourceSync,
|
||||
UpdateServer, UpdateStack,
|
||||
},
|
||||
entities::{
|
||||
build::PartialBuildConfig,
|
||||
config::cli::args::update::UpdateResource,
|
||||
deployment::PartialDeploymentConfig, repo::PartialRepoConfig,
|
||||
server::PartialServerConfig, stack::PartialStackConfig,
|
||||
sync::PartialResourceSyncConfig,
|
||||
},
|
||||
};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
|
||||
pub async fn update<
|
||||
T: std::fmt::Debug + Serialize + DeserializeOwned + ResourceUpdate,
|
||||
>(
|
||||
UpdateResource {
|
||||
resource,
|
||||
update,
|
||||
yes,
|
||||
}: &UpdateResource,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update {}\n", "Mode".dimmed(), T::resource_type());
|
||||
println!(" - {}: {resource}", "Name".dimmed());
|
||||
|
||||
let config = serde_qs::from_str::<T>(update)
|
||||
.context("Failed to deserialize config")?;
|
||||
|
||||
match serde_json::to_string_pretty(&config) {
|
||||
Ok(config) => {
|
||||
println!(" - {}: {config}", "Update".dimmed());
|
||||
}
|
||||
Err(_) => {
|
||||
println!(" - {}: {config:#?}", "Update".dimmed());
|
||||
}
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update resource", *yes)?;
|
||||
|
||||
config.apply(resource).await
|
||||
}
|
||||
|
||||
pub trait ResourceUpdate {
|
||||
fn resource_type() -> &'static str;
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialBuildConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Build"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateBuild {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update build config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialDeploymentConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Deployment"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateDeployment {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update deployment config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialRepoConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Repo"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateRepo {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update repo config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialServerConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Server"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateServer {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update server config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialStackConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Stack"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateStack {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update stack config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceUpdate for PartialResourceSyncConfig {
|
||||
fn resource_type() -> &'static str {
|
||||
"Sync"
|
||||
}
|
||||
async fn apply(self, resource: &str) -> anyhow::Result<()> {
|
||||
let client = crate::command::komodo_client().await?;
|
||||
client
|
||||
.write(UpdateResourceSync {
|
||||
id: resource.to_string(),
|
||||
config: self,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update sync config")?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
122
bin/cli/src/command/update/user.rs
Normal file
122
bin/cli/src/command/update/user.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
cli::args::{CliEnabled, update::UpdateUserCommand},
|
||||
empty_or_redacted,
|
||||
},
|
||||
optional_string,
|
||||
};
|
||||
|
||||
use crate::{command::sanitize_uri, config::cli_config};
|
||||
|
||||
pub async fn update(
|
||||
username: &str,
|
||||
command: &UpdateUserCommand,
|
||||
) -> anyhow::Result<()> {
|
||||
match command {
|
||||
UpdateUserCommand::Password {
|
||||
password,
|
||||
unsanitized,
|
||||
yes,
|
||||
} => {
|
||||
update_password(username, password, *unsanitized, *yes).await
|
||||
}
|
||||
UpdateUserCommand::SuperAdmin { enabled, yes } => {
|
||||
update_super_admin(username, *enabled, *yes).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_password(
|
||||
username: &str,
|
||||
password: &str,
|
||||
unsanitized: bool,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Password\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
if unsanitized {
|
||||
println!(" - {}: {password}", "Password".dimmed());
|
||||
} else {
|
||||
println!(
|
||||
" - {}: {}",
|
||||
"Password".dimmed(),
|
||||
empty_or_redacted(password)
|
||||
);
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update password", yes)?;
|
||||
|
||||
info!("Updating password...");
|
||||
|
||||
let db = database::Client::new(&cli_config().database).await?;
|
||||
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
db.set_user_password(&user, password).await?;
|
||||
|
||||
info!("Password updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn update_super_admin(
|
||||
username: &str,
|
||||
super_admin: CliEnabled,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
let config = cli_config();
|
||||
|
||||
println!("\n{}: Update Super Admin\n", "Mode".dimmed());
|
||||
println!(" - {}: {username}", "Username".dimmed());
|
||||
println!(" - {}: {super_admin}\n", "Super Admin".dimmed());
|
||||
|
||||
if let Some(uri) = optional_string(&config.database.uri) {
|
||||
println!("{}: {}", " - Source URI".dimmed(), sanitize_uri(&uri));
|
||||
}
|
||||
if let Some(address) = optional_string(&config.database.address) {
|
||||
println!("{}: {address}", " - Source Address".dimmed());
|
||||
}
|
||||
if let Some(username) = optional_string(&config.database.username) {
|
||||
println!("{}: {username}", " - Source Username".dimmed());
|
||||
}
|
||||
println!(
|
||||
"{}: {}",
|
||||
" - Source Db Name".dimmed(),
|
||||
config.database.db_name,
|
||||
);
|
||||
|
||||
crate::command::wait_for_enter("update super admin", yes)?;
|
||||
|
||||
info!("Updating super admin...");
|
||||
|
||||
let db = database::Client::new(&config.database).await?;
|
||||
|
||||
// Make sure the user exists first before saying it is successful.
|
||||
let user = db
|
||||
.users
|
||||
.find_one(doc! { "username": username })
|
||||
.await
|
||||
.context("Failed to query database for user")?
|
||||
.context("No user found with given username")?;
|
||||
|
||||
let super_admin: bool = super_admin.into();
|
||||
db.users
|
||||
.update_one(
|
||||
doc! { "username": user.username },
|
||||
doc! { "$set": { "super_admin": super_admin } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to update user super admin on db")?;
|
||||
|
||||
info!("Super admin updated ✅");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
70
bin/cli/src/command/update/variable.rs
Normal file
70
bin/cli/src/command/update/variable.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
use komodo_client::api::{
|
||||
read::GetVariable,
|
||||
write::{
|
||||
CreateVariable, UpdateVariableIsSecret, UpdateVariableValue,
|
||||
},
|
||||
};
|
||||
|
||||
pub async fn update(
|
||||
name: &str,
|
||||
value: &str,
|
||||
secret: Option<bool>,
|
||||
yes: bool,
|
||||
) -> anyhow::Result<()> {
|
||||
println!("\n{}: Update Variable\n", "Mode".dimmed());
|
||||
println!(" - {}: {name}", "Name".dimmed());
|
||||
println!(" - {}: {value}", "Value".dimmed());
|
||||
if let Some(secret) = secret {
|
||||
println!(" - {}: {secret}", "Is Secret".dimmed());
|
||||
}
|
||||
|
||||
crate::command::wait_for_enter("update variable", yes)?;
|
||||
|
||||
let client = crate::command::komodo_client().await?;
|
||||
|
||||
let Ok(existing) = client
|
||||
.read(GetVariable {
|
||||
name: name.to_string(),
|
||||
})
|
||||
.await
|
||||
else {
|
||||
// Create the variable
|
||||
client
|
||||
.write(CreateVariable {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
is_secret: secret.unwrap_or_default(),
|
||||
description: Default::default(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to create variable")?;
|
||||
info!("Variable created ✅");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
client
|
||||
.write(UpdateVariableValue {
|
||||
name: name.to_string(),
|
||||
value: value.to_string(),
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'value'")?;
|
||||
info!("Variable 'value' updated ✅");
|
||||
|
||||
let Some(secret) = secret else { return Ok(()) };
|
||||
|
||||
if secret != existing.is_secret {
|
||||
client
|
||||
.write(UpdateVariableIsSecret {
|
||||
name: name.to_string(),
|
||||
is_secret: secret,
|
||||
})
|
||||
.await
|
||||
.context("Failed to update variable 'is_secret'")?;
|
||||
info!("Variable 'is_secret' updated to {secret} ✅");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
280
bin/cli/src/config.rs
Normal file
280
bin/cli/src/config.rs
Normal file
@@ -0,0 +1,280 @@
|
||||
use std::{path::PathBuf, sync::OnceLock};
|
||||
|
||||
use anyhow::Context;
|
||||
use clap::Parser;
|
||||
use colored::Colorize;
|
||||
use environment_file::maybe_read_item_from_file;
|
||||
use komodo_client::entities::{
|
||||
config::{
|
||||
DatabaseConfig,
|
||||
cli::{
|
||||
CliConfig, Env,
|
||||
args::{CliArgs, Command, Execute, database::DatabaseCommand},
|
||||
},
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
|
||||
pub fn cli_args() -> &'static CliArgs {
|
||||
static CLI_ARGS: OnceLock<CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn cli_env() -> &'static Env {
|
||||
static CLI_ARGS: OnceLock<Env> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(|| {
|
||||
match envy::from_env()
|
||||
.context("Failed to parse Komodo CLI environment")
|
||||
{
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
panic!("{e:?}")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn cli_config() -> &'static CliConfig {
|
||||
static CLI_CONFIG: OnceLock<CliConfig> = OnceLock::new();
|
||||
CLI_CONFIG.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let env = cli_env().clone();
|
||||
let config_paths = args
|
||||
.config_path
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_paths);
|
||||
let debug_startup =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: Komodo CLI version: {}",
|
||||
"DEBUG".cyan(),
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
println!(
|
||||
"{}: {}: {config_paths:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config Paths".dimmed(),
|
||||
);
|
||||
}
|
||||
|
||||
let config_keywords = args
|
||||
.config_keyword
|
||||
.clone()
|
||||
.unwrap_or(env.komodo_cli_config_keywords);
|
||||
let config_keywords = config_keywords
|
||||
.iter()
|
||||
.map(String::as_str)
|
||||
.collect::<Vec<_>>();
|
||||
if debug_startup {
|
||||
println!(
|
||||
"{}: {}: {config_keywords:?}",
|
||||
"DEBUG".cyan(),
|
||||
"Config File Keywords".dimmed(),
|
||||
);
|
||||
}
|
||||
let mut unparsed_config = (config::ConfigLoader {
|
||||
paths: &config_paths
|
||||
.iter()
|
||||
.map(PathBuf::as_path)
|
||||
.collect::<Vec<_>>(),
|
||||
match_wildcards: &config_keywords,
|
||||
include_file_name: ".kminclude",
|
||||
merge_nested: env.komodo_cli_merge_nested_config,
|
||||
extend_array: env.komodo_cli_extend_config_arrays,
|
||||
debug_print: debug_startup,
|
||||
})
|
||||
.load::<serde_json::Map<String, serde_json::Value>>()
|
||||
.expect("failed at parsing config from paths");
|
||||
let init_parsed_config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config.clone()),
|
||||
)
|
||||
.context("Failed to parse config")
|
||||
.unwrap();
|
||||
|
||||
let (host, key, secret) = match &args.command {
|
||||
Command::Execute(Execute {
|
||||
host, key, secret, ..
|
||||
}) => (host.clone(), key.clone(), secret.clone()),
|
||||
_ => (None, None, None),
|
||||
};
|
||||
|
||||
let backups_folder = match &args.command {
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Backup { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
Command::Database {
|
||||
command: DatabaseCommand::Restore { backups_folder, .. },
|
||||
} => backups_folder.clone(),
|
||||
_ => None,
|
||||
};
|
||||
let (uri, address, username, password, db_name) =
|
||||
match &args.command {
|
||||
Command::Database {
|
||||
command:
|
||||
DatabaseCommand::Copy {
|
||||
uri,
|
||||
address,
|
||||
username,
|
||||
password,
|
||||
db_name,
|
||||
..
|
||||
},
|
||||
} => (
|
||||
uri.clone(),
|
||||
address.clone(),
|
||||
username.clone(),
|
||||
password.clone(),
|
||||
db_name.clone(),
|
||||
),
|
||||
_ => (None, None, None, None, None),
|
||||
};
|
||||
|
||||
let profile = args
|
||||
.profile
|
||||
.as_ref()
|
||||
.or(init_parsed_config.default_profile.as_ref());
|
||||
|
||||
let unparsed_config = if let Some(profile) = profile
|
||||
&& !profile.is_empty()
|
||||
{
|
||||
// Find the profile config,
|
||||
// then merge it with the Default config.
|
||||
let serde_json::Value::Array(profiles) = unparsed_config
|
||||
.remove("profile")
|
||||
.context("Config has no profiles, but a profile is required")
|
||||
.unwrap()
|
||||
else {
|
||||
panic!("`config.profile` is not array");
|
||||
};
|
||||
let Some(profile_config) = profiles.into_iter().find(|p| {
|
||||
let Ok(parsed) =
|
||||
serde_json::from_value::<CliConfig>(p.clone())
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
&parsed.config_profile == profile
|
||||
|| parsed
|
||||
.config_aliases
|
||||
.iter()
|
||||
.any(|alias| alias == profile)
|
||||
}) else {
|
||||
panic!("No profile matching '{profile}' was found.");
|
||||
};
|
||||
let serde_json::Value::Object(profile_config) = profile_config
|
||||
else {
|
||||
panic!("Profile config is not Object type.");
|
||||
};
|
||||
config::merge_config(
|
||||
unparsed_config,
|
||||
profile_config.clone(),
|
||||
env.komodo_cli_merge_nested_config,
|
||||
env.komodo_cli_extend_config_arrays,
|
||||
)
|
||||
.unwrap_or(profile_config)
|
||||
} else {
|
||||
unparsed_config
|
||||
};
|
||||
let config = serde_json::from_value::<CliConfig>(
|
||||
serde_json::Value::Object(unparsed_config),
|
||||
)
|
||||
.context("Failed to parse final config")
|
||||
.unwrap();
|
||||
let config_profile = if config.config_profile.is_empty() {
|
||||
String::from("None")
|
||||
} else {
|
||||
config.config_profile
|
||||
};
|
||||
|
||||
CliConfig {
|
||||
config_profile,
|
||||
config_aliases: config.config_aliases,
|
||||
default_profile: config.default_profile,
|
||||
table_borders: env
|
||||
.komodo_cli_table_borders
|
||||
.or(config.table_borders),
|
||||
host: host
|
||||
.or(env.komodo_cli_host)
|
||||
.or(env.komodo_host)
|
||||
.unwrap_or(config.host),
|
||||
cli_key: key.or(env.komodo_cli_key).or(config.cli_key),
|
||||
cli_secret: secret
|
||||
.or(env.komodo_cli_secret)
|
||||
.or(config.cli_secret),
|
||||
backups_folder: backups_folder
|
||||
.or(env.komodo_cli_backups_folder)
|
||||
.unwrap_or(config.backups_folder),
|
||||
max_backups: env
|
||||
.komodo_cli_max_backups
|
||||
.unwrap_or(config.max_backups),
|
||||
database_target: DatabaseConfig {
|
||||
uri: uri
|
||||
.or(env.komodo_cli_database_target_uri)
|
||||
.unwrap_or(config.database_target.uri),
|
||||
address: address
|
||||
.or(env.komodo_cli_database_target_address)
|
||||
.unwrap_or(config.database_target.address),
|
||||
username: username
|
||||
.or(env.komodo_cli_database_target_username)
|
||||
.unwrap_or(config.database_target.username),
|
||||
password: password
|
||||
.or(env.komodo_cli_database_target_password)
|
||||
.unwrap_or(config.database_target.password),
|
||||
db_name: db_name
|
||||
.or(env.komodo_cli_database_target_db_name)
|
||||
.unwrap_or(config.database_target.db_name),
|
||||
app_name: config.database_target.app_name,
|
||||
},
|
||||
database: DatabaseConfig {
|
||||
uri: maybe_read_item_from_file(
|
||||
env.komodo_database_uri_file,
|
||||
env.komodo_database_uri,
|
||||
)
|
||||
.unwrap_or(config.database.uri),
|
||||
address: env
|
||||
.komodo_database_address
|
||||
.unwrap_or(config.database.address),
|
||||
username: maybe_read_item_from_file(
|
||||
env.komodo_database_username_file,
|
||||
env.komodo_database_username,
|
||||
)
|
||||
.unwrap_or(config.database.username),
|
||||
password: maybe_read_item_from_file(
|
||||
env.komodo_database_password_file,
|
||||
env.komodo_database_password,
|
||||
)
|
||||
.unwrap_or(config.database.password),
|
||||
db_name: env
|
||||
.komodo_database_db_name
|
||||
.unwrap_or(config.database.db_name),
|
||||
app_name: config.database.app_name,
|
||||
},
|
||||
cli_logging: LogConfig {
|
||||
level: env
|
||||
.komodo_cli_logging_level
|
||||
.unwrap_or(config.cli_logging.level),
|
||||
stdio: env
|
||||
.komodo_cli_logging_stdio
|
||||
.unwrap_or(config.cli_logging.stdio),
|
||||
pretty: env
|
||||
.komodo_cli_logging_pretty
|
||||
.unwrap_or(config.cli_logging.pretty),
|
||||
location: false,
|
||||
ansi: env
|
||||
.komodo_cli_logging_ansi
|
||||
.unwrap_or(config.cli_logging.ansi),
|
||||
otlp_endpoint: env
|
||||
.komodo_cli_logging_otlp_endpoint
|
||||
.unwrap_or(config.cli_logging.otlp_endpoint),
|
||||
opentelemetry_service_name: env
|
||||
.komodo_cli_logging_opentelemetry_service_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_service_name),
|
||||
opentelemetry_scope_name: env
|
||||
.komodo_cli_logging_opentelemetry_scope_name
|
||||
.unwrap_or(config.cli_logging.opentelemetry_scope_name),
|
||||
},
|
||||
profile: config.profile,
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1,485 +0,0 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use colored::Colorize;
|
||||
use komodo_client::{
|
||||
api::execute::{BatchExecutionResponse, Execution},
|
||||
entities::update::Update,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::wait_for_enter,
|
||||
state::{cli_args, komodo_client},
|
||||
};
|
||||
|
||||
pub enum ExecutionResult {
|
||||
Single(Update),
|
||||
Batch(BatchExecutionResponse),
|
||||
}
|
||||
|
||||
pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
if matches!(execution, Execution::None(_)) {
|
||||
println!("Got 'none' execution. Doing nothing...");
|
||||
tokio::time::sleep(Duration::from_secs(3)).await;
|
||||
println!("Finished doing nothing. Exiting...");
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
println!("\n{}: Execution", "Mode".dimmed());
|
||||
match &execution {
|
||||
Execution::None(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunAction(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunProcedure(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchRunBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Deploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeploy(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyDeployment(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchCloneRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchBuildRepo(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CancelRepoBuild(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyContainer(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopAllContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneContainers(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteNetwork(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneNetworks(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteImage(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneImages(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeleteVolume(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneVolumes(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneDockerBuilders(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneBuildx(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PruneSystem(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RunSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::CommitSync(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDeployStackIfChanged(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::RestartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::PauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::UnpauseStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StopStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::DestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchDestroyStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::TestAlerter(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::Sleep(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
}
|
||||
|
||||
if !cli_args().yes {
|
||||
wait_for_enter("run execution")?;
|
||||
}
|
||||
|
||||
info!("Running Execution...");
|
||||
|
||||
let res = match execution {
|
||||
Execution::RunAction(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunAction(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunProcedure(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunProcedure(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::RunBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchRunBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::Deploy(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeploy(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDestroyDeployment(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CloneRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchCloneRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchPullRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::BuildRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchBuildRepo(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::CancelRepoBuild(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyContainer(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopAllContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneContainers(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteNetwork(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneNetworks(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteImage(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneImages(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeleteVolume(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneVolumes(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneDockerBuilders(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneBuildx(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PruneSystem(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RunSync(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::CommitSync(request) => komodo_client()
|
||||
.write(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DeployStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeployStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::DeployStackIfChanged(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDeployStackIfChanged(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::PullStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StartStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::RestartStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::PauseStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::UnpauseStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::StopStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::DestroyStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchDestroyStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::TestAlerter(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::Sleep(request) => {
|
||||
let duration =
|
||||
Duration::from_millis(request.duration_ms as u64);
|
||||
tokio::time::sleep(duration).await;
|
||||
println!("Finished sleeping!");
|
||||
std::process::exit(0)
|
||||
}
|
||||
Execution::None(_) => unreachable!(),
|
||||
};
|
||||
|
||||
match res {
|
||||
Ok(ExecutionResult::Single(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
}
|
||||
Ok(ExecutionResult::Batch(update)) => {
|
||||
println!("\n{}: {update:#?}", "SUCCESS".green())
|
||||
}
|
||||
Err(e) => println!("{}\n\n{e:#?}", "ERROR".red()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
use std::io::Read;
|
||||
|
||||
use anyhow::Context;
|
||||
use colored::Colorize;
|
||||
|
||||
pub fn wait_for_enter(press_enter_to: &str) -> anyhow::Result<()> {
|
||||
println!(
|
||||
"\nPress {} to {}\n",
|
||||
"ENTER".green(),
|
||||
press_enter_to.bold()
|
||||
);
|
||||
let buffer = &mut [0u8];
|
||||
std::io::stdin()
|
||||
.read_exact(buffer)
|
||||
.context("failed to read ENTER")?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,32 +1,75 @@
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
|
||||
use colored::Colorize;
|
||||
use komodo_client::api::read::GetVersion;
|
||||
use anyhow::Context;
|
||||
use komodo_client::entities::config::cli::args;
|
||||
|
||||
mod args;
|
||||
mod exec;
|
||||
mod helpers;
|
||||
mod state;
|
||||
use crate::config::cli_config;
|
||||
|
||||
mod command;
|
||||
mod config;
|
||||
|
||||
async fn app() -> anyhow::Result<()> {
|
||||
dotenvy::dotenv().ok();
|
||||
logger::init(&config::cli_config().cli_logging)?;
|
||||
let args = config::cli_args();
|
||||
let env = config::cli_env();
|
||||
let debug_load =
|
||||
args.debug_startup.unwrap_or(env.komodo_cli_debug_startup);
|
||||
|
||||
match &args.command {
|
||||
args::Command::Config {
|
||||
all_profiles,
|
||||
unsanitized,
|
||||
} => {
|
||||
let mut config = if *unsanitized {
|
||||
cli_config().clone()
|
||||
} else {
|
||||
cli_config().sanitized()
|
||||
};
|
||||
if !*all_profiles {
|
||||
config.profile = Default::default();
|
||||
}
|
||||
if debug_load {
|
||||
println!("\n{config:#?}");
|
||||
} else {
|
||||
println!(
|
||||
"\nCLI Config {}",
|
||||
serde_json::to_string_pretty(&config)
|
||||
.context("Failed to serialize config for pretty print")?
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
args::Command::Key { command } => {
|
||||
noise::key::command::handle(command).await
|
||||
}
|
||||
args::Command::Database { command } => {
|
||||
command::database::handle(command).await
|
||||
}
|
||||
args::Command::Container(container) => {
|
||||
command::container::handle(container).await
|
||||
}
|
||||
args::Command::Inspect(inspect) => {
|
||||
command::container::inspect_container(inspect).await
|
||||
}
|
||||
args::Command::List(list) => command::list::handle(list).await,
|
||||
args::Command::Execute(args) => {
|
||||
command::execute::handle(&args.execution, args.yes).await
|
||||
}
|
||||
args::Command::Update { command } => {
|
||||
command::update::handle(command).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
tracing_subscriber::fmt().with_target(false).init();
|
||||
|
||||
info!(
|
||||
"Komodo CLI version: {}",
|
||||
env!("CARGO_PKG_VERSION").blue().bold()
|
||||
);
|
||||
|
||||
let version =
|
||||
state::komodo_client().read(GetVersion {}).await?.version;
|
||||
info!("Komodo Core version: {}", version.blue().bold());
|
||||
|
||||
match &state::cli_args().command {
|
||||
args::Command::Execute { execution } => {
|
||||
exec::run(execution.to_owned()).await?
|
||||
}
|
||||
let mut term_signal = tokio::signal::unix::signal(
|
||||
tokio::signal::unix::SignalKind::terminate(),
|
||||
)?;
|
||||
tokio::select! {
|
||||
res = tokio::spawn(app()) => res?,
|
||||
_ = term_signal.recv() => Ok(()),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use clap::Parser;
|
||||
use komodo_client::KomodoClient;
|
||||
use merge_config_files::parse_config_file;
|
||||
|
||||
pub fn cli_args() -> &'static crate::args::CliArgs {
|
||||
static CLI_ARGS: OnceLock<crate::args::CliArgs> = OnceLock::new();
|
||||
CLI_ARGS.get_or_init(crate::args::CliArgs::parse)
|
||||
}
|
||||
|
||||
pub fn komodo_client() -> &'static KomodoClient {
|
||||
static KOMODO_CLIENT: OnceLock<KomodoClient> = OnceLock::new();
|
||||
KOMODO_CLIENT.get_or_init(|| {
|
||||
let args = cli_args();
|
||||
let crate::args::CredsFile { url, key, secret } =
|
||||
match (&args.url, &args.key, &args.secret) {
|
||||
(Some(url), Some(key), Some(secret)) => {
|
||||
crate::args::CredsFile {
|
||||
url: url.clone(),
|
||||
key: key.clone(),
|
||||
secret: secret.clone(),
|
||||
}
|
||||
}
|
||||
(url, key, secret) => {
|
||||
let mut creds: crate::args::CredsFile =
|
||||
parse_config_file(cli_args().creds.as_str())
|
||||
.expect("failed to parse Komodo credentials");
|
||||
|
||||
if let Some(url) = url {
|
||||
creds.url.clone_from(url);
|
||||
}
|
||||
if let Some(key) = key {
|
||||
creds.key.clone_from(key);
|
||||
}
|
||||
if let Some(secret) = secret {
|
||||
creds.secret.clone_from(secret);
|
||||
}
|
||||
|
||||
creds
|
||||
}
|
||||
};
|
||||
futures::executor::block_on(
|
||||
KomodoClient::new(url, key, secret).with_healthcheck(),
|
||||
)
|
||||
.expect("failed to initialize Komodo client")
|
||||
})
|
||||
}
|
||||
@@ -18,27 +18,30 @@ path = "src/main.rs"
|
||||
komodo_client = { workspace = true, features = ["mongo"] }
|
||||
periphery_client.workspace = true
|
||||
environment_file.workspace = true
|
||||
interpolate.workspace = true
|
||||
secret_file.workspace = true
|
||||
formatting.workspace = true
|
||||
transport.workspace = true
|
||||
database.workspace = true
|
||||
encoding.workspace = true
|
||||
response.workspace = true
|
||||
command.workspace = true
|
||||
config.workspace = true
|
||||
logger.workspace = true
|
||||
cache.workspace = true
|
||||
noise.workspace = true
|
||||
git.workspace = true
|
||||
# mogh
|
||||
serror = { workspace = true, features = ["axum"] }
|
||||
merge_config_files.workspace = true
|
||||
async_timing_util.workspace = true
|
||||
partial_derive2.workspace = true
|
||||
derive_variants.workspace = true
|
||||
mongo_indexed.workspace = true
|
||||
resolver_api.workspace = true
|
||||
toml_pretty.workspace = true
|
||||
mungos.workspace = true
|
||||
slack.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
aws-credential-types.workspace = true
|
||||
ordered_hash_map.workspace = true
|
||||
english-to-cron.workspace = true
|
||||
openidconnect.workspace = true
|
||||
jsonwebtoken.workspace = true
|
||||
@@ -50,17 +53,17 @@ tokio-util.workspace = true
|
||||
axum-extra.workspace = true
|
||||
tower-http.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yaml_ng.workspace = true
|
||||
typeshare.workspace = true
|
||||
chrono-tz.workspace = true
|
||||
octorust.workspace = true
|
||||
indexmap.workspace = true
|
||||
wildcard.workspace = true
|
||||
arc-swap.workspace = true
|
||||
colored.workspace = true
|
||||
dashmap.workspace = true
|
||||
tracing.workspace = true
|
||||
reqwest.workspace = true
|
||||
futures.workspace = true
|
||||
nom_pem.workspace = true
|
||||
dotenvy.workspace = true
|
||||
anyhow.workspace = true
|
||||
croner.workspace = true
|
||||
@@ -68,14 +71,16 @@ chrono.workspace = true
|
||||
bcrypt.workspace = true
|
||||
base64.workspace = true
|
||||
rustls.workspace = true
|
||||
bytes.workspace = true
|
||||
tokio.workspace = true
|
||||
serde.workspace = true
|
||||
strum.workspace = true
|
||||
regex.workspace = true
|
||||
axum.workspace = true
|
||||
toml.workspace = true
|
||||
uuid.workspace = true
|
||||
envy.workspace = true
|
||||
rand.workspace = true
|
||||
hmac.workspace = true
|
||||
sha2.workspace = true
|
||||
hex.workspace = true
|
||||
url.workspace = true
|
||||
@@ -1,7 +1,8 @@
|
||||
## All in one, multi stage compile + runtime Docker build for your architecture.
|
||||
|
||||
# Build Core
|
||||
FROM rust:1.86.0-bullseye AS core-builder
|
||||
FROM rust:1.90.0-bullseye AS core-builder
|
||||
RUN cargo install cargo-strip
|
||||
|
||||
WORKDIR /builder
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
@@ -9,9 +10,12 @@ COPY ./lib ./lib
|
||||
COPY ./client/core/rs ./client/core/rs
|
||||
COPY ./client/periphery ./client/periphery
|
||||
COPY ./bin/core ./bin/core
|
||||
COPY ./bin/cli ./bin/cli
|
||||
|
||||
# Compile app
|
||||
RUN cargo build -p komodo_core --release
|
||||
RUN cargo build -p komodo_core --release && \
|
||||
cargo build -p komodo_cli --release && \
|
||||
cargo strip
|
||||
|
||||
# Build Frontend
|
||||
FROM node:20.12-alpine AS frontend-builder
|
||||
@@ -24,18 +28,18 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
# Setup an application directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=core-builder /builder/target/release/core /usr/local/bin/core
|
||||
COPY --from=core-builder /builder/target/release/km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -47,9 +51,15 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
ENTRYPOINT [ "core" ]
|
||||
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
14
bin/core/debian-deps.sh
Normal file
14
bin/core/debian-deps.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
## Core deps installer
|
||||
|
||||
apt-get update
|
||||
apt-get install -y git curl ca-certificates iproute2
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Starship prompt
|
||||
curl -sS https://starship.rs/install.sh | sh -s -- --yes --bin-dir /usr/local/bin
|
||||
echo 'export STARSHIP_CONFIG=/starship.toml' >> /root/.bashrc
|
||||
echo 'eval "$(starship init bash)"' >> /root/.bashrc
|
||||
|
||||
@@ -15,21 +15,26 @@ FROM ${FRONTEND_IMAGE} AS frontend
|
||||
# Final Image
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /core /app/arch/linux/amd64
|
||||
COPY --from=aarch64 /core /app/arch/linux/arm64
|
||||
ARG TARGETPLATFORM
|
||||
RUN mv /app/arch/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/arch
|
||||
|
||||
# Copy both binaries initially, but only keep appropriate one for the TARGETPLATFORM.
|
||||
COPY --from=x86_64 /core /app/core/linux/amd64
|
||||
COPY --from=aarch64 /core /app/core/linux/arm64
|
||||
RUN mv /app/core/${TARGETPLATFORM} /usr/local/bin/core && rm -r /app/core
|
||||
|
||||
# Same for util
|
||||
COPY --from=x86_64 /km /app/km/linux/amd64
|
||||
COPY --from=aarch64 /km /app/km/linux/arm64
|
||||
RUN mv /app/km/${TARGETPLATFORM} /usr/local/bin/km && rm -r /app/km
|
||||
|
||||
# Copy default config / static frontend / deno binary
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend /frontend /app/frontend
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
@@ -42,9 +47,15 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -16,15 +16,15 @@ RUN cd frontend && yarn link komodo_client && yarn && yarn build
|
||||
|
||||
FROM debian:bullseye-slim
|
||||
|
||||
# Install Deps
|
||||
RUN apt update && \
|
||||
apt install -y git ca-certificates && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY ./bin/core/starship.toml /starship.toml
|
||||
COPY ./bin/core/debian-deps.sh .
|
||||
RUN sh ./debian-deps.sh && rm ./debian-deps.sh
|
||||
|
||||
# Copy
|
||||
COPY ./config/core.config.toml /config/config.toml
|
||||
COPY ./config/core.config.toml /config/.default.config.toml
|
||||
COPY --from=frontend-builder /builder/frontend/dist /app/frontend
|
||||
COPY --from=binaries /core /usr/local/bin/core
|
||||
COPY --from=binaries /km /usr/local/bin/km
|
||||
COPY --from=denoland/deno:bin /deno /usr/local/bin/deno
|
||||
|
||||
# Set $DENO_DIR and preload external Deno deps
|
||||
@@ -36,9 +36,15 @@ RUN mkdir /action-cache && \
|
||||
# Hint at the port
|
||||
EXPOSE 9120
|
||||
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source=https://github.com/moghtech/komodo
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses=GPL-3.0
|
||||
ENV KOMODO_CLI_CONFIG_PATHS="/config"
|
||||
# This ensures any `komodo.cli.*` takes precedence over the Core `/config/*config.*`
|
||||
ENV KOMODO_CLI_CONFIG_KEYWORDS="*config.*,*komodo.cli*.*"
|
||||
|
||||
CMD [ "core" ]
|
||||
CMD [ "/bin/bash", "-c", "update-ca-certificates && core" ]
|
||||
|
||||
# Label to prevent Komodo from stopping with StopAllContainers
|
||||
LABEL komodo.skip="true"
|
||||
# Label for Ghcr
|
||||
LABEL org.opencontainers.image.source="https://github.com/moghtech/komodo"
|
||||
LABEL org.opencontainers.image.description="Komodo Core"
|
||||
LABEL org.opencontainers.image.licenses="GPL-3.0"
|
||||
|
||||
@@ -4,7 +4,6 @@ use serde::Serialize;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -17,6 +16,28 @@ pub async fn send_alert(
|
||||
"{level} | If you see this message, then Alerter **{name}** is **working**\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} | Version mismatch detected ⚠️\nPeriphery: **{server_version}** | Core: **{core_version}**\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -28,7 +49,7 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | **{name}**{region} is now **reachable**\n{link}"
|
||||
"{level} | **{name}**{region} is now **connected**\n{link}"
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
@@ -207,36 +228,45 @@ pub async fn send_alert(
|
||||
"{level} | **{name}** ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !content.is_empty() {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {}",
|
||||
sanitized_error
|
||||
))
|
||||
})?;
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, &content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
use ::slack::types::Block;
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use derive_variants::ExtractVariant;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::entities::{
|
||||
ResourceTargetVariant,
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::*,
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
stack::StackState,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use std::collections::HashSet;
|
||||
use tracing::Instrument;
|
||||
|
||||
use crate::helpers::interpolate::interpolate_variables_secrets_into_string;
|
||||
use crate::helpers::query::get_variables_and_secrets;
|
||||
use crate::helpers::{
|
||||
maintenance::is_in_maintenance, query::VariablesAndSecrets,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
mod discord;
|
||||
@@ -22,40 +23,33 @@ mod ntfy;
|
||||
mod pushover;
|
||||
mod slack;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alerts(alerts: &[Alert]) {
|
||||
if alerts.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let span =
|
||||
info_span!("send_alerts", alerts = format!("{alerts:?}"));
|
||||
async {
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
let Ok(alerters) = find_collect(
|
||||
&db_client().alerters,
|
||||
doc! { "config.enabled": true },
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"ERROR sending alerts | failed to get alerters from db | {e:#}"
|
||||
)
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
}) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let handles =
|
||||
alerts.iter().map(|alert| send_alert(&alerters, alert));
|
||||
let handles = alerts
|
||||
.iter()
|
||||
.map(|alert| send_alert_to_alerters(&alerters, alert));
|
||||
|
||||
join_all(handles).await;
|
||||
}
|
||||
.instrument(span)
|
||||
.await
|
||||
join_all(handles).await;
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_alert(alerters: &[Alerter], alert: &Alert) {
|
||||
async fn send_alert_to_alerters(alerters: &[Alerter], alert: &Alert) {
|
||||
if alerters.is_empty() {
|
||||
return;
|
||||
}
|
||||
@@ -80,6 +74,13 @@ pub async fn send_alert_to_alerter(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if is_in_maintenance(
|
||||
&alerter.config.maintenance_windows,
|
||||
komodo_timestamp(),
|
||||
) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let alert_type = alert.data.extract_variant();
|
||||
|
||||
// In the test case, we don't want the filters inside this
|
||||
@@ -130,13 +131,15 @@ pub async fn send_alert_to_alerter(
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Ntfy(NtfyAlerterEndpoint { url }) => {
|
||||
ntfy::send_alert(url, alert).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send alert to ntfy Alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
AlerterEndpoint::Ntfy(NtfyAlerterEndpoint { url, email }) => {
|
||||
ntfy::send_alert(url, email.as_deref(), alert)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to send alert to ntfy Alerter {}",
|
||||
alerter.name
|
||||
)
|
||||
})
|
||||
}
|
||||
AlerterEndpoint::Pushover(PushoverAlerterEndpoint { url }) => {
|
||||
pushover::send_alert(url, alert).await.with_context(|| {
|
||||
@@ -149,23 +152,18 @@ pub async fn send_alert_to_alerter(
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
async fn send_custom_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let res = reqwest::Client::new()
|
||||
.post(url_interpolated)
|
||||
@@ -173,13 +171,14 @@ async fn send_custom_alert(
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with request: {}",
|
||||
sanitized_error
|
||||
"Error with request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
.context("failed at post request to alerter")?;
|
||||
@@ -235,38 +234,244 @@ fn resource_link(
|
||||
resource_type: ResourceTargetVariant,
|
||||
id: &str,
|
||||
) -> String {
|
||||
let path = match resource_type {
|
||||
ResourceTargetVariant::System => unreachable!(),
|
||||
ResourceTargetVariant::Build => format!("/builds/{id}"),
|
||||
ResourceTargetVariant::Builder => {
|
||||
format!("/builders/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Deployment => {
|
||||
format!("/deployments/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Stack => {
|
||||
format!("/stacks/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Server => {
|
||||
format!("/servers/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Repo => format!("/repos/{id}"),
|
||||
ResourceTargetVariant::Alerter => {
|
||||
format!("/alerters/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Procedure => {
|
||||
format!("/procedures/{id}")
|
||||
}
|
||||
ResourceTargetVariant::Action => {
|
||||
format!("/actions/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
format!("/server-templates/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
};
|
||||
|
||||
format!("{}{path}", core_config().host)
|
||||
komodo_client::entities::resource_link(
|
||||
&core_config().host,
|
||||
resource_type,
|
||||
id,
|
||||
)
|
||||
}
|
||||
|
||||
/// Standard message content format
|
||||
/// used by Ntfy, Pushover.
|
||||
fn standard_alert_content(alert: &Alert) -> String {
|
||||
let level = fmt_level(alert.level);
|
||||
match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {name} is working\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Periphery version now matches Core version ✅\n{link}"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | {name}{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}\n{link}"
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!("{level} | {name}{region} is now connected\n{link}")
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {e:#?}"))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {name}{region} is unreachable ❌\n{link}{err}"
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {name}{region} cpu usage at {percentage:.1}%\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {name}{region} disk usage at {percentage:.1}%💿\nmount point: {path:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} has an update available\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {name} was updated automatically\nserver: {server_name}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {name} is now {to_state}\nserver: {server_name}\nprevious: {from}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {name} has an update available\nserver: {server_name}\nservice: {service}\nimage: {image}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {name} was updated automatically ⏫\nserver: {server_name}\n{images_label}: {images_str}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {instance_id}\n{message}",
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {name}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {name} failed\n{link}",)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
format!(
|
||||
"{level} | {message}{}",
|
||||
if details.is_empty() {
|
||||
format_args!("")
|
||||
} else {
|
||||
format_args!("\n{details}")
|
||||
}
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,244 +2,56 @@ use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
email: Option<&str>,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {} is working\n{link}",
|
||||
name,
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {}{} is now reachable\n{link}",
|
||||
name, region
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {:#?}", e))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {}{} is unreachable ❌\n{link}{err}",
|
||||
name, region
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
|
||||
name, region,
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region,
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region, path,
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
|
||||
name, server_name, service, image,
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
|
||||
name, server_name, images_label, images_str,
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
|
||||
instance_id, message,
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {}\n{link}",
|
||||
name,
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {} failed\nversion: v{}\n{link}",
|
||||
name, version,
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {} failed\n{link}", name,)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
if !content.is_empty() {
|
||||
send_message(url, content).await?;
|
||||
let content = standard_alert_content(alert);
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, email, content)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
url: &str,
|
||||
email: Option<&str>,
|
||||
content: String,
|
||||
) -> anyhow::Result<()> {
|
||||
let response = http_client()
|
||||
let mut request = http_client()
|
||||
.post(url)
|
||||
.header("Title", "ntfy Alert")
|
||||
.body(content)
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to send message")?;
|
||||
.header("Title", "Komodo Alert")
|
||||
.body(content);
|
||||
|
||||
if let Some(email) = email {
|
||||
request = request.header("X-Email", email);
|
||||
}
|
||||
|
||||
let response =
|
||||
request.send().await.context("Failed to send message")?;
|
||||
|
||||
let status = response.status();
|
||||
if status.is_success() {
|
||||
@@ -248,14 +60,11 @@ async fn send_message(
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to ntfy | {} | failed to get response text",
|
||||
status
|
||||
"Failed to send message to ntfy | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
"Failed to send message to ntfy | {} | {}",
|
||||
status,
|
||||
text
|
||||
"Failed to send message to ntfy | {status} | {text}",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,230 +2,35 @@ use std::sync::OnceLock;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
) -> anyhow::Result<()> {
|
||||
let level = fmt_level(alert.level);
|
||||
let content = match &alert.data {
|
||||
AlertData::Test { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Alerter, id);
|
||||
format!(
|
||||
"{level} | If you see this message, then Alerter {} is working\n{link}",
|
||||
name,
|
||||
)
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
err,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | {}{} is now reachable\n{link}",
|
||||
name, region
|
||||
)
|
||||
}
|
||||
SeverityLevel::Critical => {
|
||||
let err = err
|
||||
.as_ref()
|
||||
.map(|e| format!("\nerror: {:#?}", e))
|
||||
.unwrap_or_default();
|
||||
format!(
|
||||
"{level} | {}{} is unreachable ❌\n{link}{err}",
|
||||
name, region
|
||||
)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
AlertData::ServerCpu {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
percentage,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
format!(
|
||||
"{level} | {}{} cpu usage at {percentage:.1}%\n{link}",
|
||||
name, region,
|
||||
)
|
||||
}
|
||||
AlertData::ServerMem {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} memory usage at {percentage:.1}%💾\n\nUsing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region,
|
||||
)
|
||||
}
|
||||
AlertData::ServerDisk {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
path,
|
||||
used_gb,
|
||||
total_gb,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let link = resource_link(ResourceTargetVariant::Server, id);
|
||||
let percentage = 100.0 * used_gb / total_gb;
|
||||
format!(
|
||||
"{level} | {}{} disk usage at {percentage:.1}%💿\nmount point: {:?}\nusing {used_gb:.1} GiB / {total_gb:.1} GiB\n{link}",
|
||||
name, region, path,
|
||||
)
|
||||
}
|
||||
AlertData::ContainerStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
let to_state = fmt_docker_container_state(to);
|
||||
format!(
|
||||
"📦Deployment {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} has an update available\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
)
|
||||
}
|
||||
AlertData::DeploymentAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Deployment, id);
|
||||
format!(
|
||||
"⬆ Deployment {} was updated automatically\nserver: {}\nimage: {}\n{link}",
|
||||
name, server_name, image,
|
||||
)
|
||||
}
|
||||
AlertData::StackStateChange {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
from,
|
||||
to,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let to_state = fmt_stack_state(to);
|
||||
format!(
|
||||
"🥞 Stack {} is now {}\nserver: {}\nprevious: {}\n{link}",
|
||||
name, to_state, server_name, from,
|
||||
)
|
||||
}
|
||||
AlertData::StackImageUpdateAvailable {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
service,
|
||||
image,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
format!(
|
||||
"⬆ Stack {} has an update available\nserver: {}\nservice: {}\nimage: {}\n{link}",
|
||||
name, server_name, service, image,
|
||||
)
|
||||
}
|
||||
AlertData::StackAutoUpdated {
|
||||
id,
|
||||
name,
|
||||
server_id: _server_id,
|
||||
server_name,
|
||||
images,
|
||||
} => {
|
||||
let link = resource_link(ResourceTargetVariant::Stack, id);
|
||||
let images_label =
|
||||
if images.len() > 1 { "images" } else { "image" };
|
||||
let images_str = images.join(", ");
|
||||
format!(
|
||||
"⬆ Stack {} was updated automatically ⏫\nserver: {}\n{}: {}\n{link}",
|
||||
name, server_name, images_label, images_str,
|
||||
)
|
||||
}
|
||||
AlertData::AwsBuilderTerminationFailed {
|
||||
instance_id,
|
||||
message,
|
||||
} => {
|
||||
format!(
|
||||
"{level} | Failed to terminate AWS builder instance\ninstance id: {}\n{}",
|
||||
instance_id, message,
|
||||
)
|
||||
}
|
||||
AlertData::ResourceSyncPendingUpdates { id, name } => {
|
||||
let link =
|
||||
resource_link(ResourceTargetVariant::ResourceSync, id);
|
||||
format!(
|
||||
"{level} | Pending resource sync updates on {}\n{link}",
|
||||
name,
|
||||
)
|
||||
}
|
||||
AlertData::BuildFailed { id, name, version } => {
|
||||
let link = resource_link(ResourceTargetVariant::Build, id);
|
||||
format!(
|
||||
"{level} | Build {name} failed\nversion: v{version}\n{link}",
|
||||
)
|
||||
}
|
||||
AlertData::RepoBuildFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Repo, id);
|
||||
format!("{level} | Repo build for {} failed\n{link}", name,)
|
||||
}
|
||||
AlertData::ProcedureFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Procedure, id);
|
||||
format!("{level} | Procedure {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ActionFailed { id, name } => {
|
||||
let link = resource_link(ResourceTargetVariant::Action, id);
|
||||
format!("{level} | Action {name} failed\n{link}")
|
||||
}
|
||||
AlertData::ScheduleRun {
|
||||
resource_type,
|
||||
id,
|
||||
name,
|
||||
} => {
|
||||
let link = resource_link(*resource_type, id);
|
||||
format!(
|
||||
"{level} | {name} ({resource_type}) | Scheduled run started 🕝\n{link}"
|
||||
)
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
|
||||
if !content.is_empty() {
|
||||
send_message(url, content).await?;
|
||||
let content = standard_alert_content(alert);
|
||||
if content.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
Ok(())
|
||||
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
send_message(&url_interpolated, content).await.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
async fn send_message(
|
||||
@@ -252,8 +57,7 @@ async fn send_message(
|
||||
} else {
|
||||
let text = response.text().await.with_context(|| {
|
||||
format!(
|
||||
"Failed to send message to pushover | {} | failed to get response text",
|
||||
status
|
||||
"Failed to send message to pushover | {status} | failed to get response text"
|
||||
)
|
||||
})?;
|
||||
Err(anyhow!(
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use ::slack::types::OwnedBlock as Block;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub async fn send_alert(
|
||||
url: &str,
|
||||
alert: &Alert,
|
||||
@@ -23,6 +24,35 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerVersionMismatch {
|
||||
id,
|
||||
name,
|
||||
region,
|
||||
server_version,
|
||||
core_version,
|
||||
} => {
|
||||
let region = fmt_region(region);
|
||||
let text = match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Periphery version now matches Core version ✅"
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"{level} | *{name}*{region} | Version mismatch detected ⚠️\nPeriphery: {server_version} | Core: {core_version}"
|
||||
)
|
||||
}
|
||||
};
|
||||
let blocks = vec![
|
||||
Block::header(text.clone()),
|
||||
Block::section(resource_link(
|
||||
ResourceTargetVariant::Server,
|
||||
id,
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::ServerUnreachable {
|
||||
id,
|
||||
name,
|
||||
@@ -33,11 +63,11 @@ pub async fn send_alert(
|
||||
match alert.level {
|
||||
SeverityLevel::Ok => {
|
||||
let text =
|
||||
format!("{level} | *{name}*{region} is now *reachable*");
|
||||
format!("{level} | *{name}*{region} is now *connected*");
|
||||
let blocks = vec![
|
||||
Block::header(level),
|
||||
Block::section(format!(
|
||||
"*{name}*{region} is now *reachable*"
|
||||
"*{name}*{region} is now *connnected*"
|
||||
)),
|
||||
];
|
||||
(text, blocks.into())
|
||||
@@ -429,33 +459,40 @@ pub async fn send_alert(
|
||||
];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::Custom { message, details } => {
|
||||
let text = format!("{level} | {message}");
|
||||
let blocks =
|
||||
vec![Block::header(text.clone()), Block::section(details)];
|
||||
(text, blocks.into())
|
||||
}
|
||||
AlertData::None {} => Default::default(),
|
||||
};
|
||||
if !text.is_empty() {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut url_interpolated = url.to_string();
|
||||
if text.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
let mut url_interpolated = url.to_string();
|
||||
|
||||
// interpolate variables and secrets into the url
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut url_interpolated,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack.send_message(text, blocks).await.map_err(|e| {
|
||||
let replacers =
|
||||
secret_replacers.into_iter().collect::<Vec<_>>();
|
||||
interpolator.interpolate_string(&mut url_interpolated)?;
|
||||
|
||||
let slack = ::slack::Client::new(url_interpolated);
|
||||
slack
|
||||
.send_owned_message_single(&text, None, blocks.as_deref())
|
||||
.await
|
||||
.map_err(|e| {
|
||||
let replacers = interpolator
|
||||
.secret_replacers
|
||||
.into_iter()
|
||||
.collect::<Vec<_>>();
|
||||
let sanitized_error =
|
||||
svi::replace_in_string(&format!("{e:?}"), &replacers);
|
||||
anyhow::Error::msg(format!(
|
||||
"Error with slack request: {}",
|
||||
sanitized_error
|
||||
"Error with slack request: {sanitized_error}"
|
||||
))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use axum::{Router, http::HeaderMap, routing::post};
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serror::Json;
|
||||
use serde_json::json;
|
||||
use serror::{AddStatusCode, Json};
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -15,13 +17,16 @@ use crate::{
|
||||
get_user_id_from_headers,
|
||||
github::{self, client::github_oauth_client},
|
||||
google::{self, client::google_oauth_client},
|
||||
oidc,
|
||||
oidc::{self, client::oidc_client},
|
||||
},
|
||||
config::core_config,
|
||||
helpers::query::get_user,
|
||||
state::jwt_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AuthArgs {
|
||||
pub headers: HeaderMap,
|
||||
}
|
||||
@@ -38,14 +43,16 @@ pub struct AuthArgs {
|
||||
#[allow(clippy::enum_variant_names, clippy::large_enum_variant)]
|
||||
pub enum AuthRequest {
|
||||
GetLoginOptions(GetLoginOptions),
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
SignUpLocalUser(SignUpLocalUser),
|
||||
LoginLocalUser(LoginLocalUser),
|
||||
ExchangeForJwt(ExchangeForJwt),
|
||||
GetUser(GetUser),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
let mut router = Router::new().route("/", post(handler));
|
||||
let mut router = Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler));
|
||||
|
||||
if core_config().local_auth {
|
||||
info!("🔑 Local Login Enabled");
|
||||
@@ -57,7 +64,7 @@ pub fn router() -> Router {
|
||||
}
|
||||
|
||||
if google_oauth_client().is_some() {
|
||||
info!("🔑 Github Login Enabled");
|
||||
info!("🔑 Google Login Enabled");
|
||||
router = router.nest("/google", google::router())
|
||||
}
|
||||
|
||||
@@ -69,7 +76,18 @@ pub fn router() -> Router {
|
||||
router
|
||||
}
|
||||
|
||||
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
|
||||
async fn variant_handler(
|
||||
headers: HeaderMap,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: AuthRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(headers, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
headers: HeaderMap,
|
||||
Json(request): Json<AuthRequest>,
|
||||
@@ -97,22 +115,15 @@ fn login_options_reponse() -> &'static GetLoginOptionsResponse {
|
||||
let config = core_config();
|
||||
GetLoginOptionsResponse {
|
||||
local: config.local_auth,
|
||||
github: config.github_oauth.enabled
|
||||
&& !config.github_oauth.id.is_empty()
|
||||
&& !config.github_oauth.secret.is_empty(),
|
||||
google: config.google_oauth.enabled
|
||||
&& !config.google_oauth.id.is_empty()
|
||||
&& !config.google_oauth.secret.is_empty(),
|
||||
oidc: config.oidc_enabled
|
||||
&& !config.oidc_provider.is_empty()
|
||||
&& !config.oidc_client_id.is_empty(),
|
||||
github: github_oauth_client().is_some(),
|
||||
google: google_oauth_client().is_some(),
|
||||
oidc: oidc_client().load().is_some(),
|
||||
registration_disabled: config.disable_user_registration,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetLoginOptions {
|
||||
#[instrument(name = "GetLoginOptions", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
@@ -122,23 +133,27 @@ impl Resolve<AuthArgs> for GetLoginOptions {
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for ExchangeForJwt {
|
||||
#[instrument(name = "ExchangeForJwt", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &AuthArgs,
|
||||
) -> serror::Result<ExchangeForJwtResponse> {
|
||||
let jwt = jwt_client().redeem_exchange_token(&self.token).await?;
|
||||
Ok(ExchangeForJwtResponse { jwt })
|
||||
jwt_client()
|
||||
.redeem_exchange_token(&self.token)
|
||||
.await
|
||||
.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<AuthArgs> for GetUser {
|
||||
#[instrument(name = "GetUser", level = "debug", skip(self))]
|
||||
async fn resolve(
|
||||
self,
|
||||
AuthArgs { headers }: &AuthArgs,
|
||||
) -> serror::Result<User> {
|
||||
let user_id = get_user_id_from_headers(headers).await?;
|
||||
Ok(get_user(&user_id).await?)
|
||||
let user_id = get_user_id_from_headers(headers)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)?;
|
||||
get_user(&user_id)
|
||||
.await
|
||||
.status_code(StatusCode::UNAUTHORIZED)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +1,34 @@
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
sync::OnceLock,
|
||||
};
|
||||
|
||||
use anyhow::Context;
|
||||
use command::run_komodo_command;
|
||||
use config::merge_objects;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{
|
||||
execute::{BatchExecutionResponse, BatchRunAction, RunAction},
|
||||
user::{CreateApiKey, CreateApiKeyResponse, DeleteApiKey},
|
||||
},
|
||||
entities::{
|
||||
FileFormat, JsonObject,
|
||||
action::Action,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
random_string,
|
||||
update::Update,
|
||||
user::action_user,
|
||||
},
|
||||
parsers::parse_key_value_list,
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
@@ -31,15 +37,11 @@ use crate::{
|
||||
api::{execute::ExecuteRequest, user::UserArgs},
|
||||
config::core_config,
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
query::get_variables_and_secrets,
|
||||
random_string,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
update::update_update,
|
||||
},
|
||||
resource::{self, refresh_action_state_cache},
|
||||
permission::get_check_permissions,
|
||||
resource::refresh_action_state_cache,
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
@@ -48,15 +50,26 @@ use super::ExecuteArgs;
|
||||
impl super::BatchExecute for BatchRunAction {
|
||||
type Resource = Action;
|
||||
fn single_request(action: String) -> ExecuteRequest {
|
||||
ExecuteRequest::RunAction(RunAction { action })
|
||||
ExecuteRequest::RunAction(RunAction {
|
||||
action,
|
||||
args: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
#[instrument(name = "BatchRunAction", skip(self, user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunAction>(&self.pattern, user)
|
||||
@@ -66,15 +79,24 @@ impl Resolve<ExecuteArgs> for BatchRunAction {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunAction {
|
||||
#[instrument(name = "RunAction", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunAction",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
action = self.action,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut action = resource::get_check_permissions::<Action>(
|
||||
let mut action = get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -86,13 +108,33 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure action not already busy before updating.
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.running = true)?;
|
||||
let _action_guard = action_state.update_custom(
|
||||
|state| state.running += 1,
|
||||
|state| state.running -= 1,
|
||||
false,
|
||||
)?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let default_args = parse_action_arguments(
|
||||
&action.config.arguments,
|
||||
action.config.arguments_format,
|
||||
)
|
||||
.context("Failed to parse default Action arguments")?;
|
||||
|
||||
let args = merge_objects(
|
||||
default_args,
|
||||
self.args.unwrap_or_default(),
|
||||
true,
|
||||
true,
|
||||
)
|
||||
.context("Failed to merge request args with default args")?;
|
||||
|
||||
let args = serde_json::to_string(&args)
|
||||
.context("Failed to serialize action run arguments")?;
|
||||
|
||||
let CreateApiKeyResponse { key, secret } = CreateApiKey {
|
||||
name: update.id.clone(),
|
||||
expires: 0,
|
||||
@@ -105,7 +147,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let contents = &mut action.config.file_contents;
|
||||
|
||||
// Wrap the file contents in the execution context.
|
||||
*contents = full_contents(contents, &key, &secret);
|
||||
*contents = full_contents(contents, &args, &key, &secret);
|
||||
|
||||
let replacers =
|
||||
interpolate(contents, &mut update, key.clone(), secret.clone())
|
||||
@@ -116,15 +158,11 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let file = format!("{}.ts", random_string(10));
|
||||
let path = core_config().action_directory.join(&file);
|
||||
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.await
|
||||
.with_context(|| format!("Failed to initialize Action file parent directory {parent:?}"))?;
|
||||
}
|
||||
|
||||
fs::write(&path, contents).await.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
secret_file::write_async(&path, contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write action file to {path:?}")
|
||||
})?;
|
||||
|
||||
let CoreConfig { ssl_enabled, .. } = core_config();
|
||||
|
||||
@@ -134,12 +172,18 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
""
|
||||
};
|
||||
|
||||
let reload = if action.config.reload_deno_deps {
|
||||
" --reload"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
let mut res = run_komodo_command(
|
||||
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
|
||||
"Execute Action",
|
||||
None,
|
||||
format!(
|
||||
"deno run --allow-all{https_cert_flag} {}",
|
||||
"deno run --allow-all{https_cert_flag}{reload} {}",
|
||||
path.display()
|
||||
),
|
||||
)
|
||||
@@ -175,7 +219,7 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -185,7 +229,6 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && action.config.failure_alert {
|
||||
warn!("action unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -208,44 +251,45 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("Interpolate", skip(contents, update, secret))]
|
||||
async fn interpolate(
|
||||
contents: &mut String,
|
||||
update: &mut Update,
|
||||
key: String,
|
||||
secret: String,
|
||||
) -> serror::Result<HashSet<(String, String)>> {
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets {
|
||||
variables,
|
||||
mut secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_KEY"), key);
|
||||
vars_and_secrets
|
||||
.secrets
|
||||
.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
secrets.insert(String::from("ACTION_API_KEY"), key);
|
||||
secrets.insert(String::from("ACTION_API_SECRET"), secret);
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
contents,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_string(contents)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
add_interp_update_log(update, &global_replacers, &secret_replacers);
|
||||
|
||||
Ok(secret_replacers)
|
||||
Ok(interpolator.secret_replacers)
|
||||
}
|
||||
|
||||
fn full_contents(contents: &str, key: &str, secret: &str) -> String {
|
||||
fn full_contents(
|
||||
contents: &str,
|
||||
// Pre-serialized to JSON string.
|
||||
args: &str,
|
||||
key: &str,
|
||||
secret: &str,
|
||||
) -> String {
|
||||
let CoreConfig {
|
||||
port, ssl_enabled, ..
|
||||
} = core_config();
|
||||
let protocol = if *ssl_enabled { "https" } else { "http" };
|
||||
let base_url = format!("{protocol}://localhost:{port}");
|
||||
format!(
|
||||
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
|
||||
"import {{ KomodoClient, Types }} from '{base_url}/client/lib.js';
|
||||
import * as __YAML__ from 'jsr:@std/yaml';
|
||||
import * as __TOML__ from 'jsr:@std/toml';
|
||||
|
||||
@@ -263,6 +307,8 @@ const TOML = {{
|
||||
parseCargoToml: __TOML__.parse,
|
||||
}}
|
||||
|
||||
const ARGS = {args};
|
||||
|
||||
const komodo = KomodoClient('{base_url}', {{
|
||||
type: 'api-key',
|
||||
params: {{ key: '{key}', secret: '{secret}' }}
|
||||
@@ -281,7 +327,7 @@ main()
|
||||
console.error('Status:', error.status);
|
||||
console.error(JSON.stringify(error.result, null, 2));
|
||||
}} else {{
|
||||
console.error(JSON.stringify(error, null, 2));
|
||||
console.error(error);
|
||||
}}
|
||||
Deno.exit(1)
|
||||
}});"
|
||||
@@ -291,6 +337,7 @@ main()
|
||||
/// Cleans up file at given path.
|
||||
/// ALSO if $DENO_DIR is set,
|
||||
/// will clean up the generated file matching "file"
|
||||
#[instrument("CleanupRun")]
|
||||
async fn cleanup_run(file: String, path: &Path) {
|
||||
if let Err(e) = fs::remove_file(path).await {
|
||||
warn!(
|
||||
@@ -310,7 +357,7 @@ fn deno_dir() -> Option<&'static Path> {
|
||||
DENO_DIR
|
||||
.get_or_init(|| {
|
||||
let deno_dir = std::env::var("DENO_DIR").ok()?;
|
||||
PathBuf::from_str(&deno_dir).ok()
|
||||
Some(PathBuf::from(&deno_dir))
|
||||
})
|
||||
.as_deref()
|
||||
}
|
||||
@@ -368,3 +415,25 @@ fn delete_file(
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn parse_action_arguments(
|
||||
args: &str,
|
||||
format: FileFormat,
|
||||
) -> anyhow::Result<JsonObject> {
|
||||
match format {
|
||||
FileFormat::KeyValue => {
|
||||
let args = parse_key_value_list(args)
|
||||
.context("Failed to parse args as key value list")?
|
||||
.into_iter()
|
||||
.map(|(k, v)| (k, serde_json::Value::String(v)))
|
||||
.collect();
|
||||
Ok(args)
|
||||
}
|
||||
FileFormat::Toml => toml::from_str(args)
|
||||
.context("Failed to parse Toml to Action args"),
|
||||
FileFormat::Yaml => serde_yaml_ng::from_str(args)
|
||||
.context("Failed to parse Yaml to action args"),
|
||||
FileFormat::Json => serde_json::from_str(args)
|
||||
.context("Failed to parse Json to action args"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,32 +1,45 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use futures::{StreamExt, TryStreamExt, stream::FuturesUnordered};
|
||||
use komodo_client::{
|
||||
api::execute::TestAlerter,
|
||||
api::execute::{SendAlert, TestAlerter},
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
alert::{Alert, AlertData, AlertDataVariant, SeverityLevel},
|
||||
alerter::Alerter,
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
alert::send_alert_to_alerter, helpers::update::update_update,
|
||||
resource::get_check_permissions,
|
||||
permission::get_check_permissions, resource::list_full_for_user,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
#[instrument(name = "TestAlerter", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"TestAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
alerter = self.alerter,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerter = get_check_permissions::<Alerter>(
|
||||
&self.alerter,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -71,3 +84,105 @@ impl Resolve<ExecuteArgs> for TestAlerter {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ExecuteArgs> for SendAlert {
|
||||
#[instrument(
|
||||
"SendAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{self:?}"),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let alerters =
|
||||
list_full_for_user::<Alerter>(Default::default(), user, &[])
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|a| {
|
||||
a.config.enabled
|
||||
&& (self.alerters.is_empty()
|
||||
|| self.alerters.contains(&a.name)
|
||||
|| self.alerters.contains(&a.id))
|
||||
&& (a.config.alert_types.is_empty()
|
||||
|| a
|
||||
.config
|
||||
.alert_types
|
||||
.contains(&AlertDataVariant::Custom))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let alerters = if user.admin {
|
||||
alerters
|
||||
} else {
|
||||
// Only keep alerters with execute permissions
|
||||
alerters
|
||||
.into_iter()
|
||||
.map(|alerter| async move {
|
||||
get_check_permissions::<Alerter>(
|
||||
&alerter.id,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect()
|
||||
};
|
||||
|
||||
if alerters.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Could not find any valid alerters to send to, this required Execute permissions on the Alerter"
|
||||
).status_code(StatusCode::BAD_REQUEST));
|
||||
}
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let ts = komodo_timestamp();
|
||||
|
||||
let alert = Alert {
|
||||
id: Default::default(),
|
||||
ts,
|
||||
resolved: true,
|
||||
level: self.level,
|
||||
target: update.target.clone(),
|
||||
data: AlertData::Custom {
|
||||
message: self.message,
|
||||
details: self.details,
|
||||
},
|
||||
resolved_ts: Some(ts),
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"Send alert",
|
||||
serde_json::to_string_pretty(&alert)
|
||||
.context("Failed to serialize alert to JSON")?,
|
||||
);
|
||||
|
||||
if let Err(e) = alerters
|
||||
.iter()
|
||||
.map(|alerter| send_alert_to_alerter(alerter, &alert))
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.try_collect::<Vec<_>>()
|
||||
.await
|
||||
{
|
||||
update.push_error_log("Send Error", format_serror(&e.into()));
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,21 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
future::IntoFuture,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, to_bson, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BatchExecutionResponse, BatchRunBuild, CancelBuild, Deploy,
|
||||
@@ -11,43 +24,35 @@ use komodo_client::{
|
||||
entities::{
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
all_logs_success,
|
||||
build::{Build, BuildConfig, ImageRegistryConfig},
|
||||
build::{Build, BuildConfig},
|
||||
builder::{Builder, BuilderConfig},
|
||||
deployment::DeploymentState,
|
||||
komodo_timestamp,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
update::{Log, Update},
|
||||
user::auto_redeploy_user,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{doc, to_bson, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
build_git_token,
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
channel::build_cancel_channel,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
query::{
|
||||
VariablesAndSecrets, get_deployment_state,
|
||||
get_variables_and_secrets,
|
||||
},
|
||||
query::{get_deployment_state, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::{init_execution_update, update_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource::{self, refresh_build_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
@@ -62,10 +67,18 @@ impl super::BatchExecute for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
#[instrument(name = "BatchRunBuild", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchRunBuild>(&self.pattern, user)
|
||||
@@ -75,21 +88,44 @@ impl Resolve<ExecuteArgs> for BatchRunBuild {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunBuild {
|
||||
#[instrument(name = "RunBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let mut repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let VariablesAndSecrets {
|
||||
mut variables,
|
||||
secrets,
|
||||
} = get_variables_and_secrets().await?;
|
||||
|
||||
// Add the $VERSION to variables. Use with [[$VERSION]]
|
||||
vars_and_secrets.variables.insert(
|
||||
variables.insert(
|
||||
String::from("$VERSION"),
|
||||
build.config.version.to_string(),
|
||||
);
|
||||
@@ -116,18 +152,11 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
update.version = build.config.version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let git_token = git_token(
|
||||
&build.config.git_provider,
|
||||
&build.config.git_account,
|
||||
|https| build.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. This is a database error, not a token exisitence error. Stopping run. | {} | {}", build.config.git_provider, build.config.git_account),
|
||||
)?;
|
||||
let git_token =
|
||||
build_git_token(&mut build, repo.as_mut()).await?;
|
||||
|
||||
let registry_token =
|
||||
validate_account_extract_registry_token(&build).await?;
|
||||
let registry_tokens =
|
||||
validate_account_extract_registry_tokens(&build).await?;
|
||||
|
||||
let cancel = CancellationToken::new();
|
||||
let cancel_clone = cancel.clone();
|
||||
@@ -175,7 +204,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
});
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
build.name.clone(),
|
||||
Some(build.config.version),
|
||||
builder,
|
||||
@@ -202,75 +231,45 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
// INTERPOLATE VARIABLES
|
||||
let secret_replacers = if !build.config.skip_secret_interp {
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.pre_build,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator.interpolate_build(&mut build)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.build_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
if let Some(repo) = repo.as_mut() {
|
||||
interpolator.interpolate_repo(repo)?;
|
||||
}
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.secret_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.dockerfile,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut build.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
|
||||
let commit_message = if !build.config.files_on_host
|
||||
&& !build.config.repo.is_empty()
|
||||
&& (!build.config.repo.is_empty()
|
||||
|| !build.config.linked_repo.is_empty())
|
||||
{
|
||||
// CLONE REPO
|
||||
// PULL OR CLONE REPO
|
||||
let res = tokio::select! {
|
||||
res = periphery
|
||||
.request(api::git::CloneRepo {
|
||||
args: (&build).into(),
|
||||
.request(api::git::PullOrCloneRepo {
|
||||
args: repo.as_ref().map(Into::into).unwrap_or((&build).into()),
|
||||
git_token,
|
||||
environment: Default::default(),
|
||||
env_file_path: Default::default(),
|
||||
on_clone: None,
|
||||
on_pull: None,
|
||||
skip_secret_interp: Default::default(),
|
||||
replacers: Default::default(),
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
debug!("Build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
info!("Builder cleaned up");
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
};
|
||||
@@ -278,16 +277,16 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash =
|
||||
res.commit_hash.unwrap_or_default().to_string();
|
||||
res.commit_message.unwrap_or_default()
|
||||
res.res.commit_hash.unwrap_or_default().to_string();
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("failed build at clone repo | {e:#}");
|
||||
warn!("Failed build at clone repo | {e:#}");
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
@@ -306,19 +305,18 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
res = periphery
|
||||
.request(api::build::Build {
|
||||
build: build.clone(),
|
||||
registry_token,
|
||||
repo,
|
||||
registry_tokens,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
// Push a commit hash tagged image
|
||||
additional_tags: if update.commit_hash.is_empty() {
|
||||
Default::default()
|
||||
} else {
|
||||
vec![update.commit_hash.clone()]
|
||||
},
|
||||
// To push a commit hash tagged image
|
||||
commit_hash: optional_string(&update.commit_hash),
|
||||
// Unused for now
|
||||
additional_tags: Default::default(),
|
||||
}) => res.context("failed at call to periphery to build"),
|
||||
_ = cancel.cancelled() => {
|
||||
info!("build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
info!("Build cancelled during build, cleaning up builder");
|
||||
update.push_error_log("Build cancelled", String::from("user cancelled build during docker build"));
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
return handle_early_return(update, build.id, build.name, true).await
|
||||
},
|
||||
@@ -332,7 +330,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
Err(e) => {
|
||||
warn!("error in build | {e:#}");
|
||||
update.push_error_log(
|
||||
"build",
|
||||
"Build Error",
|
||||
format_serror(&e.context("failed to build").into()),
|
||||
)
|
||||
}
|
||||
@@ -364,7 +362,8 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
@@ -374,7 +373,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -389,7 +388,6 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
handle_post_build_redeploy(&build.id).await;
|
||||
});
|
||||
} else {
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -414,7 +412,7 @@ impl Resolve<ExecuteArgs> for RunBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
#[instrument("HandleEarlyReturn", skip(update))]
|
||||
async fn handle_early_return(
|
||||
mut update: Update,
|
||||
build_id: String,
|
||||
@@ -430,7 +428,7 @@ async fn handle_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -438,7 +436,6 @@ async fn handle_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
let version = update.version;
|
||||
tokio::spawn(async move {
|
||||
@@ -508,15 +505,24 @@ pub async fn validate_cancel_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
#[instrument(name = "CancelBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CancelBuild",
|
||||
skip(user, update),
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -568,7 +574,7 @@ impl Resolve<ExecuteArgs> for CancelBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument("PostBuildRedeploy")]
|
||||
async fn handle_post_build_redeploy(build_id: &str) {
|
||||
let Ok(redeploy_deployments) = find_collect(
|
||||
&db_client().deployments,
|
||||
@@ -587,8 +593,9 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
redeploy_deployments
|
||||
.into_iter()
|
||||
.map(|deployment| async move {
|
||||
let state =
|
||||
get_deployment_state(&deployment).await.unwrap_or_default();
|
||||
let state = get_deployment_state(&deployment.id)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
if state == DeploymentState::Running {
|
||||
let req = super::ExecuteRequest::Deploy(Deploy {
|
||||
deployment: deployment.id.clone(),
|
||||
@@ -603,7 +610,11 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
stop_signal: None,
|
||||
stop_time: None,
|
||||
}
|
||||
.resolve(&ExecuteArgs { user, update })
|
||||
.resolve(&ExecuteArgs {
|
||||
user,
|
||||
update,
|
||||
id: Uuid::new_v4(),
|
||||
})
|
||||
.await
|
||||
}
|
||||
.await;
|
||||
@@ -629,34 +640,49 @@ async fn handle_post_build_redeploy(build_id: &str) {
|
||||
/// This will make sure that a build with non-none image registry has an account attached,
|
||||
/// and will check the core config for a token matching requirements.
|
||||
/// Otherwise it is left to periphery.
|
||||
async fn validate_account_extract_registry_token(
|
||||
#[instrument("ValidateRegistryTokens")]
|
||||
async fn validate_account_extract_registry_tokens(
|
||||
Build {
|
||||
config:
|
||||
BuildConfig {
|
||||
image_registry:
|
||||
ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
},
|
||||
..
|
||||
},
|
||||
config: BuildConfig { image_registry, .. },
|
||||
..
|
||||
}: &Build,
|
||||
) -> serror::Result<Option<String>> {
|
||||
if domain.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
// Maps (domain, account) -> token
|
||||
) -> serror::Result<Vec<(String, String, String)>> {
|
||||
let mut res = HashMap::with_capacity(image_registry.capacity());
|
||||
|
||||
for (domain, account) in image_registry
|
||||
.iter()
|
||||
.map(|r| (r.domain.as_str(), r.account.as_str()))
|
||||
// This ensures uniqueness / prevents redundant logins
|
||||
.collect::<HashSet<_>>()
|
||||
{
|
||||
if domain.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if account.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Must attach account to use registry provider {domain}"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let Some(registry_token) = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)? else {
|
||||
continue;
|
||||
};
|
||||
|
||||
res.insert(
|
||||
(domain.to_string(), account.to_string()),
|
||||
registry_token,
|
||||
);
|
||||
}
|
||||
|
||||
let registry_token = registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get registry token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?;
|
||||
|
||||
Ok(registry_token)
|
||||
Ok(
|
||||
res
|
||||
.into_iter()
|
||||
.map(|((domain, account), token)| (domain, account, token))
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use std::{collections::HashSet, sync::OnceLock};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use cache::TimeoutCache;
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
@@ -11,7 +12,7 @@ use komodo_client::{
|
||||
deployment::{
|
||||
Deployment, DeploymentImage, extract_registry_domain,
|
||||
},
|
||||
get_image_name, komodo_timestamp, optional_string,
|
||||
komodo_timestamp, optional_string,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
update::{Log, Update},
|
||||
@@ -23,17 +24,13 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_extra_args,
|
||||
interpolate_variables_secrets_into_string,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
registry_token,
|
||||
update::update_update,
|
||||
},
|
||||
monitor::update_cache_for_server,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::action_states,
|
||||
};
|
||||
@@ -52,10 +49,18 @@ impl super::BatchExecute for BatchDeploy {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
#[instrument(name = "BatchDeploy", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDeploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDeploy>(&self.pattern, user)
|
||||
@@ -64,14 +69,15 @@ impl Resolve<ExecuteArgs> for BatchDeploy {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("SetupDeploy", skip_all)]
|
||||
async fn setup_deployment_execution(
|
||||
deployment: &str,
|
||||
user: &User,
|
||||
) -> anyhow::Result<(Deployment, Server)> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
deployment,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -90,10 +96,21 @@ async fn setup_deployment_execution(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for Deploy {
|
||||
#[instrument(name = "Deploy", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"Deploy",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
stop_signal = format!("{:?}", self.stop_signal),
|
||||
stop_time = self.stop_time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (mut deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -118,8 +135,11 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
let (version, registry_token) = match &deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version
|
||||
} else {
|
||||
@@ -136,21 +156,27 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
deployment.config.image = DeploymentImage::Image {
|
||||
image: format!("{image_name}:{version_str}"),
|
||||
};
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(version, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
deployment.config.image_registry_account = account
|
||||
deployment.config.image_registry_account =
|
||||
account.to_string();
|
||||
}
|
||||
let token = if !deployment
|
||||
.config
|
||||
.image_registry_account
|
||||
.is_empty()
|
||||
{
|
||||
registry_token(&domain, &deployment.config.image_registry_account).await.with_context(
|
||||
registry_token(domain, &deployment.config.image_registry_account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {}", deployment.config.image_registry_account),
|
||||
)?
|
||||
} else {
|
||||
@@ -179,53 +205,17 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
let secret_replacers = if !deployment.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_deployment(&mut deployment)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.ports,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.volumes,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_extra_args(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.extra_args,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut deployment.config.command,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
&mut update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
secret_replacers
|
||||
interpolator.secret_replacers
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
@@ -233,7 +223,8 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
update.version = version;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::Deploy {
|
||||
deployment,
|
||||
stop_signal: self.stop_signal,
|
||||
@@ -252,7 +243,7 @@ impl Resolve<ExecuteArgs> for Deploy {
|
||||
}
|
||||
};
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -272,6 +263,14 @@ fn pull_cache() -> &'static PullCache {
|
||||
PULL_CACHE.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"PullDeploymentInner",
|
||||
skip_all,
|
||||
fields(
|
||||
deployment = deployment.id,
|
||||
server = server.id
|
||||
)
|
||||
)]
|
||||
pub async fn pull_deployment_inner(
|
||||
deployment: Deployment,
|
||||
server: &Server,
|
||||
@@ -279,8 +278,11 @@ pub async fn pull_deployment_inner(
|
||||
let (image, account, token) = match deployment.config.image {
|
||||
DeploymentImage::Build { build_id, version } => {
|
||||
let build = resource::get::<Build>(&build_id).await?;
|
||||
let image_name = get_image_name(&build)
|
||||
.context("failed to create image name")?;
|
||||
let image_names = build.get_image_names();
|
||||
let image_name = image_names
|
||||
.first()
|
||||
.context("No image name could be created")
|
||||
.context("Failed to create image name")?;
|
||||
let version = if version.is_none() {
|
||||
build.config.version.to_string()
|
||||
} else {
|
||||
@@ -294,26 +296,31 @@ pub async fn pull_deployment_inner(
|
||||
};
|
||||
// replace image with corresponding build image.
|
||||
let image = format!("{image_name}:{version}");
|
||||
if build.config.image_registry.domain.is_empty() {
|
||||
let first_registry = build
|
||||
.config
|
||||
.image_registry
|
||||
.first()
|
||||
.unwrap_or(ImageRegistryConfig::static_default());
|
||||
if first_registry.domain.is_empty() {
|
||||
(image, None, None)
|
||||
} else {
|
||||
let ImageRegistryConfig {
|
||||
domain, account, ..
|
||||
} = build.config.image_registry;
|
||||
} = first_registry;
|
||||
let account =
|
||||
if deployment.config.image_registry_account.is_empty() {
|
||||
account
|
||||
} else {
|
||||
deployment.config.image_registry_account
|
||||
&deployment.config.image_registry_account
|
||||
};
|
||||
let token = if !account.is_empty() {
|
||||
registry_token(&domain, &account).await.with_context(
|
||||
registry_token(domain, account).await.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {domain} | {account}"),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
(image, optional_string(&account), token)
|
||||
(image, optional_string(account), token)
|
||||
}
|
||||
}
|
||||
DeploymentImage::Image { image } => {
|
||||
@@ -353,8 +360,9 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
let res = async {
|
||||
let log = match periphery_client(server)?
|
||||
.request(api::image::PullImage {
|
||||
let log = match periphery_client(server)
|
||||
.await?
|
||||
.request(api::docker::PullImage {
|
||||
name: image,
|
||||
account,
|
||||
token,
|
||||
@@ -365,7 +373,7 @@ pub async fn pull_deployment_inner(
|
||||
Err(e) => Log::error("Pull image", format_serror(&e.into())),
|
||||
};
|
||||
|
||||
update_cache_for_server(server).await;
|
||||
update_cache_for_server(server, true).await;
|
||||
anyhow::Ok(log)
|
||||
}
|
||||
.await;
|
||||
@@ -378,10 +386,19 @@ pub async fn pull_deployment_inner(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
#[instrument(name = "PullDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PullDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -412,10 +429,19 @@ impl Resolve<ExecuteArgs> for PullDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
#[instrument(name = "StartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -436,7 +462,8 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -450,7 +477,7 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -459,10 +486,19 @@ impl Resolve<ExecuteArgs> for StartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
#[instrument(name = "RestartDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -483,7 +519,8 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -499,7 +536,7 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -508,10 +545,19 @@ impl Resolve<ExecuteArgs> for RestartDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
#[instrument(name = "PauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -532,7 +578,8 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -546,7 +593,7 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -555,10 +602,19 @@ impl Resolve<ExecuteArgs> for PauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
#[instrument(name = "UnpauseDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -579,7 +635,8 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseContainer {
|
||||
name: deployment.name,
|
||||
})
|
||||
@@ -595,7 +652,7 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -604,10 +661,21 @@ impl Resolve<ExecuteArgs> for UnpauseDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
#[instrument(name = "StopDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -628,7 +696,8 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -650,7 +719,7 @@ impl Resolve<ExecuteArgs> for StopDeployment {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -670,10 +739,18 @@ impl super::BatchExecute for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
#[instrument(name = "BatchDestroyDeployment", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchDestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchDestroyDeployment>(
|
||||
@@ -686,10 +763,21 @@ impl Resolve<ExecuteArgs> for BatchDestroyDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
#[instrument(name = "DestroyDeployment", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DestroyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
deployment = self.deployment,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let (deployment, server) =
|
||||
setup_deployment_execution(&self.deployment, user).await?;
|
||||
@@ -710,7 +798,8 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RemoveContainer {
|
||||
name: deployment.name,
|
||||
signal: self
|
||||
@@ -733,7 +822,7 @@ impl Resolve<ExecuteArgs> for DestroyDeployment {
|
||||
|
||||
update.logs.push(log);
|
||||
update.finalize();
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
|
||||
588
bin/core/src/api/execute/maintenance.rs
Normal file
588
bin/core/src/api/execute/maintenance.rs
Normal file
@@ -0,0 +1,588 @@
|
||||
use std::{fmt::Write as _, sync::OnceLock};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use command::run_komodo_command;
|
||||
use database::{
|
||||
bson::{Document, doc},
|
||||
mungos::find::find_collect,
|
||||
};
|
||||
use formatting::{bold, format_serror};
|
||||
use futures::{StreamExt, stream::FuturesOrdered};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
BackupCoreDatabase, ClearRepoCache, GlobalAutoUpdate,
|
||||
RotateAllServerKeys, RotateCoreKeys,
|
||||
},
|
||||
entities::{
|
||||
deployment::DeploymentState, server::ServerState,
|
||||
stack::StackState,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
api::execute::{
|
||||
ExecuteArgs, pull_deployment_inner, pull_stack_inner,
|
||||
},
|
||||
config::{core_config, core_keys},
|
||||
helpers::{periphery_client, update::update_update},
|
||||
resource::rotate_server_keys,
|
||||
state::{
|
||||
db_client, deployment_status_cache, server_status_cache,
|
||||
stack_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn clear_repo_cache_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for ClearRepoCache {
|
||||
#[instrument(
|
||||
"ClearRepoCache",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = clear_repo_cache_lock()
|
||||
.try_lock()
|
||||
.context("Clear already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
let mut contents =
|
||||
tokio::fs::read_dir(&core_config().repo_directory)
|
||||
.await
|
||||
.context("Failed to read repo cache directory")?;
|
||||
|
||||
loop {
|
||||
let path = match contents
|
||||
.next_entry()
|
||||
.await
|
||||
.context("Failed to read contents at path")
|
||||
{
|
||||
Ok(Some(contents)) => contents.path(),
|
||||
Ok(None) => break,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Read Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if path.is_dir() {
|
||||
match tokio::fs::remove_dir_all(&path)
|
||||
.await
|
||||
.context("Failed to clear contents at path")
|
||||
{
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Clear Directory",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn backup_database_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BackupCoreDatabase {
|
||||
#[instrument(
|
||||
"BackupCoreDatabase",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = backup_database_lock()
|
||||
.try_lock()
|
||||
.context("Backup already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let res = run_komodo_command(
|
||||
"Backup Core Database",
|
||||
None,
|
||||
"km database backup --yes",
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.push(res);
|
||||
update.finalize();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_update_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for GlobalAutoUpdate {
|
||||
#[instrument(
|
||||
"GlobalAutoUpdate",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_update_lock()
|
||||
.try_lock()
|
||||
.context("Global update already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
// This is all done in sequence because there is no rush,
|
||||
// the pulls / deploys happen spaced out to ease the load on system.
|
||||
let servers = find_collect(&db_client().servers, None, None)
|
||||
.await
|
||||
.context("Failed to query for servers from database")?;
|
||||
|
||||
let query = doc! {
|
||||
"$or": [
|
||||
{ "config.poll_for_updates": true },
|
||||
{ "config.auto_update": true }
|
||||
]
|
||||
};
|
||||
|
||||
let (stacks, repos) = tokio::try_join!(
|
||||
find_collect(&db_client().stacks, query.clone(), None),
|
||||
find_collect(&db_client().repos, None, None)
|
||||
)
|
||||
.context("Failed to query for resources from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let stack_status_cache = stack_status_cache();
|
||||
|
||||
// Will be edited later at update.logs[0]
|
||||
update.push_simple_log("Auto Pull", String::new());
|
||||
|
||||
for stack in stacks {
|
||||
let Some(status) = stack_status_cache.get(&stack.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running stacks.
|
||||
if !matches!(status.curr.state, StackState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == stack.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = stack.name.clone();
|
||||
let repo = if stack.config.linked_repo.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let Some(repo) =
|
||||
repos.iter().find(|r| r.id == stack.config.linked_repo)
|
||||
else {
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format!(
|
||||
"Did not find any Repo matching {}",
|
||||
stack.config.linked_repo
|
||||
),
|
||||
);
|
||||
continue;
|
||||
};
|
||||
Some(repo.clone())
|
||||
};
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), server, repo, None)
|
||||
.await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Stack {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0]
|
||||
.stdout
|
||||
.push_str(&format!("Pulled Stack {} ✅", bold(name)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let deployment_status_cache = deployment_status_cache();
|
||||
let deployments =
|
||||
find_collect(&db_client().deployments, query, None)
|
||||
.await
|
||||
.context("Failed to query for deployments from database")?;
|
||||
for deployment in deployments {
|
||||
let Some(status) =
|
||||
deployment_status_cache.get(&deployment.id).await
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
// Only pull running deployments.
|
||||
if !matches!(status.curr.state, DeploymentState::Running) {
|
||||
continue;
|
||||
}
|
||||
if let Some(server) =
|
||||
servers.iter().find(|s| s.id == deployment.config.server_id)
|
||||
// This check is probably redundant along with running check
|
||||
// but shouldn't hurt
|
||||
&& server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| matches!(s.state, ServerState::Ok))
|
||||
.unwrap_or_default()
|
||||
{
|
||||
let name = deployment.name.clone();
|
||||
if let Err(e) =
|
||||
pull_deployment_inner(deployment, server).await
|
||||
{
|
||||
update.push_error_log(
|
||||
&format!("Pull Deployment {name}"),
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
} else {
|
||||
if !update.logs[0].stdout.is_empty() {
|
||||
update.logs[0].stdout.push('\n');
|
||||
}
|
||||
update.logs[0].stdout.push_str(&format!(
|
||||
"Pulled Deployment {} ✅",
|
||||
bold(name)
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Makes sure the method can only be called once at a time
|
||||
fn global_rotate_lock() -> &'static Mutex<()> {
|
||||
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
LOCK.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateAllServerKeys {
|
||||
#[instrument(
|
||||
"RotateAllServerKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let mut servers = db_client()
|
||||
.servers
|
||||
.find(Document::new())
|
||||
.await
|
||||
.context("Failed to query servers from database")?;
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
|
||||
let mut log = String::new();
|
||||
|
||||
while let Some(server) = servers.next().await {
|
||||
let server = match server {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
warn!("Failed to parse Server | {e:#}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
if !server.config.auto_rotate_keys {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Key Rotation Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let Some(status) = server_status_cache.get(&server.id).await
|
||||
else {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: No Status ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
};
|
||||
match status.state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
match rotate_server_keys(&server).await {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated keys for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate {} keys",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Server Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RotateCoreKeys {
|
||||
#[instrument(
|
||||
"RotateCoreKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
force = self.force,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("This method is admin only.")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let _lock = global_rotate_lock()
|
||||
.try_lock()
|
||||
.context("Key rotation already in progress...")?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let core_keys = core_keys();
|
||||
|
||||
if !core_keys.rotatable() {
|
||||
return Err(anyhow!("Core `private_key` must be pointing to file, for example 'file:/config/keys/core.key'").into());
|
||||
};
|
||||
|
||||
let server_status_cache = server_status_cache();
|
||||
let servers =
|
||||
find_collect(&db_client().servers, Document::new(), None)
|
||||
.await
|
||||
.context("Failed to query servers from database")?
|
||||
.into_iter()
|
||||
.map(|server| async move {
|
||||
let state = server_status_cache
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.state)
|
||||
.unwrap_or(ServerState::NotOk);
|
||||
(server, state)
|
||||
})
|
||||
.collect::<FuturesOrdered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await;
|
||||
|
||||
if !self.force
|
||||
&& let Some((server, _)) = servers
|
||||
.iter()
|
||||
.find(|(_, state)| matches!(state, ServerState::NotOk))
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Server {} is NotOk, stopping key rotation. Pass `force: true` to continue anyways.", server.name).into(),
|
||||
);
|
||||
}
|
||||
|
||||
let public_key = core_keys.rotate().await?.into_inner();
|
||||
|
||||
info!("New Public Key: {public_key}");
|
||||
|
||||
let mut log = format!("New Public Key: {public_key}\n");
|
||||
|
||||
for (server, state) in servers {
|
||||
match state {
|
||||
ServerState::Disabled => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Disabled ⚙️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
// Shouldn't be reached unless 'force: true'
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nSkipping {}: Server Not Ok ⚠️",
|
||||
bold(&server.name)
|
||||
);
|
||||
continue;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
let periphery = periphery_client(&server).await?;
|
||||
let res = periphery
|
||||
.request(api::keys::RotateCorePublicKey {
|
||||
public_key: public_key.clone(),
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(_) => {
|
||||
let _ = write!(
|
||||
&mut log,
|
||||
"\nRotated key for {} ✅",
|
||||
bold(&server.name)
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Key Rotation Failure",
|
||||
format_serror(
|
||||
&e.context(format!(
|
||||
"Failed to rotate for {}. The new Core public key will have to be added manually.",
|
||||
bold(&server.name)
|
||||
))
|
||||
.into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
update.push_simple_log("Rotate Core Keys", log);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,38 @@
|
||||
use std::{pin::Pin, time::Instant};
|
||||
use std::pin::Pin;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use axum_extra::{TypedHeader, headers::ContentType};
|
||||
use database::mungos::by_id::find_one_by_id;
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use formatting::format_serror;
|
||||
use futures::future::join_all;
|
||||
use futures::{
|
||||
StreamExt as _, future::join_all, stream::FuturesUnordered,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::execute::*,
|
||||
entities::{
|
||||
Operation,
|
||||
permission::PermissionLevel,
|
||||
update::{Log, Update},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::by_id::find_one_by_id;
|
||||
use resolver_api::Resolve;
|
||||
use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
helpers::update::{init_execution_update, update_update},
|
||||
permission::get_check_permissions,
|
||||
resource::{KomodoResource, list_full_for_user_using_pattern},
|
||||
state::db_client,
|
||||
};
|
||||
@@ -33,18 +41,23 @@ mod action;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod deployment;
|
||||
mod maintenance;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub use {
|
||||
deployment::pull_deployment_inner, stack::pull_stack_inner,
|
||||
};
|
||||
|
||||
pub struct ExecuteArgs {
|
||||
/// The execution id.
|
||||
/// Unique for every /execute call.
|
||||
pub id: Uuid,
|
||||
pub user: User,
|
||||
pub update: Update,
|
||||
}
|
||||
@@ -53,7 +66,7 @@ pub struct ExecuteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[args(ExecuteArgs)]
|
||||
#[response(JsonString)]
|
||||
#[error(serror::Error)]
|
||||
@@ -82,6 +95,22 @@ pub enum ExecuteRequest {
|
||||
PruneBuildx(PruneBuildx),
|
||||
PruneSystem(PruneSystem),
|
||||
|
||||
// ==== STACK ====
|
||||
DeployStack(DeployStack),
|
||||
BatchDeployStack(BatchDeployStack),
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
BatchPullStack(BatchPullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
PauseStack(PauseStack),
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
RunStackService(RunStackService),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
Deploy(Deploy),
|
||||
BatchDeploy(BatchDeploy),
|
||||
@@ -94,20 +123,6 @@ pub enum ExecuteRequest {
|
||||
DestroyDeployment(DestroyDeployment),
|
||||
BatchDestroyDeployment(BatchDestroyDeployment),
|
||||
|
||||
// ==== STACK ====
|
||||
DeployStack(DeployStack),
|
||||
BatchDeployStack(BatchDeployStack),
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
PauseStack(PauseStack),
|
||||
UnpauseStack(UnpauseStack),
|
||||
DestroyStack(DestroyStack),
|
||||
BatchDestroyStack(BatchDestroyStack),
|
||||
|
||||
// ==== BUILD ====
|
||||
RunBuild(RunBuild),
|
||||
BatchRunBuild(BatchRunBuild),
|
||||
@@ -130,22 +145,40 @@ pub enum ExecuteRequest {
|
||||
RunAction(RunAction),
|
||||
BatchRunAction(BatchRunAction),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer(LaunchServer),
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
SendAlert(SendAlert),
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync(RunSync),
|
||||
|
||||
// ==== MAINTENANCE ====
|
||||
ClearRepoCache(ClearRepoCache),
|
||||
BackupCoreDatabase(BackupCoreDatabase),
|
||||
GlobalAutoUpdate(GlobalAutoUpdate),
|
||||
RotateAllServerKeys(RotateAllServerKeys),
|
||||
RotateCoreKeys(RotateCoreKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let req: ExecuteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteRequest>,
|
||||
@@ -158,8 +191,11 @@ async fn handler(
|
||||
Ok((TypedHeader(ContentType::json()), res))
|
||||
}
|
||||
|
||||
#[typeshare(serialized_as = "Update")]
|
||||
type BoxUpdate = Box<Update>;
|
||||
|
||||
pub enum ExecutionResult {
|
||||
Single(Update),
|
||||
Single(BoxUpdate),
|
||||
/// The batch contents will be pre serialized here
|
||||
Batch(String),
|
||||
}
|
||||
@@ -174,10 +210,12 @@ pub fn inner_handler(
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let req_id = Uuid::new_v4();
|
||||
let task_id = Uuid::new_v4();
|
||||
|
||||
// need to validate no cancel is active before any update is created.
|
||||
// Need to validate no cancel is active before any update is created.
|
||||
// This ensures no double update created if Cancel is called more than once for the same request.
|
||||
build::validate_cancel_build(&request).await?;
|
||||
repo::validate_cancel_repo_build(&request).await?;
|
||||
|
||||
let update = init_execution_update(&request, &user).await?;
|
||||
|
||||
@@ -188,28 +226,37 @@ pub fn inner_handler(
|
||||
// here either.
|
||||
if update.operation == Operation::None {
|
||||
return Ok(ExecutionResult::Batch(
|
||||
task(req_id, request, user, update).await?,
|
||||
task(task_id, request, user, update).await?,
|
||||
));
|
||||
}
|
||||
|
||||
// Spawn a task for the execution which continues
|
||||
// running after this method returns.
|
||||
let handle =
|
||||
tokio::spawn(task(req_id, request, user, update.clone()));
|
||||
tokio::spawn(task(task_id, request, user, update.clone()));
|
||||
|
||||
// Spawns another task to monitor the first for failures,
|
||||
// and add the log to Update about it (which primary task can't do because it errored out)
|
||||
tokio::spawn({
|
||||
let update_id = update.id.clone();
|
||||
async move {
|
||||
let log = match handle.await {
|
||||
Ok(Err(e)) => {
|
||||
warn!("/execute request {req_id} task error: {e:#}",);
|
||||
Log::error("task error", format_serror(&e.into()))
|
||||
warn!("/execute request {task_id} task error: {e:#}",);
|
||||
Log::error("Task Error", format_serror(&e.into()))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("/execute request {req_id} spawn error: {e:?}",);
|
||||
Log::error("spawn error", format!("{e:#?}"))
|
||||
warn!("/execute request {task_id} spawn error: {e:?}",);
|
||||
Log::error("Spawn Error", format!("{e:#?}"))
|
||||
}
|
||||
_ => return,
|
||||
};
|
||||
let res = async {
|
||||
// Nothing to do if update was never actually created,
|
||||
// which is the case when the id is empty.
|
||||
if update_id.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
let mut update =
|
||||
find_one_by_id(&db_client().updates, &update_id)
|
||||
.await
|
||||
@@ -229,44 +276,37 @@ pub fn inner_handler(
|
||||
}
|
||||
});
|
||||
|
||||
Ok(ExecutionResult::Single(update))
|
||||
Ok(ExecutionResult::Single(update.into()))
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteRequest",
|
||||
skip(user, update),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
update_id = update.id,
|
||||
request = format!("{:?}", request.extract_variant()))
|
||||
)
|
||||
]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
id: Uuid,
|
||||
request: ExecuteRequest,
|
||||
user: User,
|
||||
update: Update,
|
||||
) -> anyhow::Result<String> {
|
||||
info!("/execute request {req_id} | user: {}", user.username);
|
||||
let timer = Instant::now();
|
||||
let variant = request.extract_variant();
|
||||
|
||||
let res = match request.resolve(&ExecuteArgs { user, update }).await
|
||||
{
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e).context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
info!(
|
||||
"/execute request {id} | {variant} | user: {}",
|
||||
user.username
|
||||
);
|
||||
|
||||
let res =
|
||||
match request.resolve(&ExecuteArgs { user, update, id }).await {
|
||||
Err(e) => Err(e.error),
|
||||
Ok(JsonString::Err(e)) => Err(
|
||||
anyhow::Error::from(e)
|
||||
.context("failed to serialize response"),
|
||||
),
|
||||
Ok(JsonString::Ok(res)) => Ok(res),
|
||||
};
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/execute request {req_id} error: {e:#}");
|
||||
warn!("/execute request {id} error: {e:#}");
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/execute request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
@@ -275,6 +315,7 @@ trait BatchExecute {
|
||||
fn single_request(name: String) -> ExecuteRequest;
|
||||
}
|
||||
|
||||
#[instrument("BatchExecute", skip(user))]
|
||||
async fn batch_execute<E: BatchExecute>(
|
||||
pattern: &str,
|
||||
user: &User,
|
||||
@@ -286,6 +327,29 @@ async fn batch_execute<E: BatchExecute>(
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resources = if user.admin {
|
||||
resources
|
||||
} else {
|
||||
// Only keep resources with execute permissions
|
||||
resources
|
||||
.into_iter()
|
||||
.map(|resource| async move {
|
||||
get_check_permissions::<E::Resource>(
|
||||
&resource.id,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await
|
||||
})
|
||||
.collect::<FuturesUnordered<_>>()
|
||||
.collect::<Vec<_>>()
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect()
|
||||
};
|
||||
|
||||
let futures = resources.into_iter().map(|resource| {
|
||||
let user = user.clone();
|
||||
async move {
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use std::pin::Pin;
|
||||
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use formatting::{Color, bold, colored, format_serror, muted};
|
||||
use komodo_client::{
|
||||
api::execute::{
|
||||
@@ -14,14 +17,14 @@ use komodo_client::{
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
alert::send_alerts,
|
||||
helpers::{procedure::execute_procedure, update::update_update},
|
||||
resource::{self, refresh_procedure_state_cache},
|
||||
permission::get_check_permissions,
|
||||
resource::refresh_procedure_state_cache,
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
@@ -35,7 +38,11 @@ impl super::BatchExecute for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
#[instrument(name = "BatchRunProcedure", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchRunProcedure",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
@@ -48,10 +55,19 @@ impl Resolve<ExecuteArgs> for BatchRunProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunProcedure {
|
||||
#[instrument(name = "RunProcedure", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
procedure = self.procedure,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(
|
||||
resolve_inner(self.procedure, user.clone(), update.clone())
|
||||
@@ -70,10 +86,10 @@ fn resolve_inner(
|
||||
>,
|
||||
> {
|
||||
Box::pin(async move {
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
let procedure = get_check_permissions::<Procedure>(
|
||||
&procedure,
|
||||
&user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -133,7 +149,7 @@ fn resolve_inner(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -143,7 +159,6 @@ fn resolve_inner(
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success && procedure.config.failure_alert {
|
||||
warn!("procedure unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
use std::{collections::HashSet, future::IntoFuture, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use interpolate::Interpolator;
|
||||
use komodo_client::{
|
||||
api::{execute::*, write::RefreshRepoCache},
|
||||
entities::{
|
||||
@@ -14,13 +22,6 @@ use komodo_client::{
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::{
|
||||
bson::{doc, to_document},
|
||||
options::FindOneOptions,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
@@ -29,18 +30,13 @@ use crate::{
|
||||
alert::send_alerts,
|
||||
api::write::WriteArgs,
|
||||
helpers::{
|
||||
builder::{cleanup_builder_instance, get_builder_periphery},
|
||||
builder::{cleanup_builder_instance, connect_builder_periphery},
|
||||
channel::repo_cancel_channel,
|
||||
git_token,
|
||||
interpolate::{
|
||||
add_interp_update_log,
|
||||
interpolate_variables_secrets_into_string,
|
||||
interpolate_variables_secrets_into_system_command,
|
||||
},
|
||||
periphery_client,
|
||||
query::get_variables_and_secrets,
|
||||
git_token, periphery_client,
|
||||
query::{VariablesAndSecrets, get_variables_and_secrets},
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource::{self, refresh_repo_state_cache},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
@@ -55,10 +51,18 @@ impl super::BatchExecute for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
#[instrument(name = "BatchCloneRepo", skip( user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchCloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchCloneRepo>(&self.pattern, user)
|
||||
@@ -68,15 +72,24 @@ impl Resolve<ExecuteArgs> for BatchCloneRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
#[instrument(name = "CloneRepo", skip( user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CloneRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -109,7 +122,7 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -122,16 +135,18 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => res.logs,
|
||||
Ok(res) => res.res.logs,
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
)]
|
||||
}
|
||||
};
|
||||
@@ -155,22 +170,30 @@ impl Resolve<ExecuteArgs> for CloneRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_server_update_return(update).await
|
||||
handle_repo_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
impl super::BatchExecute for BatchPullRepo {
|
||||
type Resource = Repo;
|
||||
fn single_request(repo: String) -> ExecuteRequest {
|
||||
ExecuteRequest::CloneRepo(CloneRepo { repo })
|
||||
ExecuteRequest::PullRepo(PullRepo { repo })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchPullRepo {
|
||||
#[instrument(name = "BatchPullRepo", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchPullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchPullRepo>(&self.pattern, user)
|
||||
@@ -180,15 +203,24 @@ impl Resolve<ExecuteArgs> for BatchPullRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PullRepo {
|
||||
#[instrument(name = "PullRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PullRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -222,7 +254,7 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
// interpolate variables / secrets, returning the sanitizing replacers to send to
|
||||
// periphery so it may sanitize the final command for safe logging (avoids exposing secret values)
|
||||
@@ -235,14 +267,15 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(res) => {
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.logs
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
res.res.logs
|
||||
}
|
||||
Err(e) => {
|
||||
vec![Log::error(
|
||||
@@ -272,12 +305,16 @@ impl Resolve<ExecuteArgs> for PullRepo {
|
||||
);
|
||||
};
|
||||
|
||||
handle_server_update_return(update).await
|
||||
handle_repo_update_return(update).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(update_id = update.id))]
|
||||
async fn handle_server_update_return(
|
||||
#[instrument(
|
||||
"HandleRepoEarlyReturn",
|
||||
skip_all,
|
||||
fields(update_id = update.id)
|
||||
)]
|
||||
async fn handle_repo_update_return(
|
||||
update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
// Need to manually update the update before cache refresh,
|
||||
@@ -288,7 +325,7 @@ async fn handle_server_update_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -298,7 +335,7 @@ async fn handle_server_update_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
#[instrument("UpdateLastPulledTime")]
|
||||
async fn update_last_pulled_time(repo_name: &str) {
|
||||
let res = db_client()
|
||||
.repos
|
||||
@@ -322,10 +359,18 @@ impl super::BatchExecute for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
#[instrument(name = "BatchBuildRepo", skip(user), fields(user_id = user.id))]
|
||||
#[instrument(
|
||||
"BatchBuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
pattern = self.pattern,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
ExecuteArgs { user, id, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchBuildRepo>(&self.pattern, user)
|
||||
@@ -335,15 +380,24 @@ impl Resolve<ExecuteArgs> for BatchBuildRepo {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
#[instrument(name = "BuildRepo", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"BuildRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -420,7 +474,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// GET BUILDER PERIPHERY
|
||||
|
||||
let (periphery, cleanup_data) = match get_builder_periphery(
|
||||
let (periphery, cleanup_data) = match connect_builder_periphery(
|
||||
repo.name.clone(),
|
||||
None,
|
||||
builder,
|
||||
@@ -456,13 +510,15 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
git_token,
|
||||
environment: repo.config.env_vars()?,
|
||||
env_file_path: repo.config.env_file_path,
|
||||
on_clone: repo.config.on_clone.into(),
|
||||
on_pull: repo.config.on_pull.into(),
|
||||
skip_secret_interp: repo.config.skip_secret_interp,
|
||||
replacers: secret_replacers.into_iter().collect()
|
||||
}) => res,
|
||||
_ = cancel.cancelled() => {
|
||||
debug!("build cancelled during clone, cleaning up builder");
|
||||
update.push_error_log("build cancelled", String::from("user cancelled build during repo clone"));
|
||||
cleanup_builder_instance(cleanup_data, &mut update)
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
info!("builder cleaned up");
|
||||
return handle_builder_early_return(update, repo.id, repo.name, true).await
|
||||
@@ -472,14 +528,15 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let commit_message = match res {
|
||||
Ok(res) => {
|
||||
debug!("finished repo clone");
|
||||
update.logs.extend(res.logs);
|
||||
update.commit_hash = res.commit_hash.unwrap_or_default();
|
||||
res.commit_message.unwrap_or_default()
|
||||
update.logs.extend(res.res.logs);
|
||||
update.commit_hash = res.res.commit_hash.unwrap_or_default();
|
||||
|
||||
res.res.commit_message.unwrap_or_default()
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"clone repo",
|
||||
format_serror(&e.context("failed to clone repo").into()),
|
||||
"Clone Repo",
|
||||
format_serror(&e.context("Failed to clone repo").into()),
|
||||
);
|
||||
Default::default()
|
||||
}
|
||||
@@ -508,7 +565,8 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
|
||||
// If building on temporary cloud server (AWS),
|
||||
// this will terminate the server.
|
||||
cleanup_builder_instance(cleanup_data, &mut update).await;
|
||||
cleanup_builder_instance(periphery, cleanup_data, &mut update)
|
||||
.await;
|
||||
|
||||
// Need to manually update the update before cache refresh,
|
||||
// and before broadcast with add_update.
|
||||
@@ -518,7 +576,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
let _ = update_one_by_id(
|
||||
&db.updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -528,7 +586,6 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
if !update.success {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -551,7 +608,7 @@ impl Resolve<ExecuteArgs> for BuildRepo {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(update))]
|
||||
#[instrument("HandleRepoBuildEarlyReturn", skip(update))]
|
||||
async fn handle_builder_early_return(
|
||||
mut update: Update,
|
||||
repo_id: String,
|
||||
@@ -567,7 +624,7 @@ async fn handle_builder_early_return(
|
||||
let _ = update_one_by_id(
|
||||
&db_client().updates,
|
||||
&update.id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
@@ -575,7 +632,6 @@ async fn handle_builder_early_return(
|
||||
}
|
||||
update_update(update.clone()).await?;
|
||||
if !update.success && !is_cancel {
|
||||
warn!("repo build unsuccessful, alerting...");
|
||||
let target = update.target.clone();
|
||||
tokio::spawn(async move {
|
||||
let alert = Alert {
|
||||
@@ -596,7 +652,6 @@ async fn handle_builder_early_return(
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn validate_cancel_repo_build(
|
||||
request: &ExecuteRequest,
|
||||
) -> anyhow::Result<()> {
|
||||
@@ -646,15 +701,24 @@ pub async fn validate_cancel_repo_build(
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
#[instrument(name = "CancelRepoBuild", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"CancelRepoBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
repo = self.repo,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -706,44 +770,29 @@ impl Resolve<ExecuteArgs> for CancelRepoBuild {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
"Interpolate",
|
||||
skip_all,
|
||||
fields(
|
||||
skip_secret_interp = repo.config.skip_secret_interp
|
||||
)
|
||||
)]
|
||||
async fn interpolate(
|
||||
repo: &mut Repo,
|
||||
update: &mut Update,
|
||||
) -> anyhow::Result<HashSet<(String, String)>> {
|
||||
if !repo.config.skip_secret_interp {
|
||||
let vars_and_secrets = get_variables_and_secrets().await?;
|
||||
let VariablesAndSecrets { variables, secrets } =
|
||||
get_variables_and_secrets().await?;
|
||||
|
||||
let mut global_replacers = HashSet::new();
|
||||
let mut secret_replacers = HashSet::new();
|
||||
let mut interpolator =
|
||||
Interpolator::new(Some(&variables), &secrets);
|
||||
|
||||
interpolate_variables_secrets_into_string(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.environment,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
interpolator
|
||||
.interpolate_repo(repo)?
|
||||
.push_logs(&mut update.logs);
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_clone,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
interpolate_variables_secrets_into_system_command(
|
||||
&vars_and_secrets,
|
||||
&mut repo.config.on_pull,
|
||||
&mut global_replacers,
|
||||
&mut secret_replacers,
|
||||
)?;
|
||||
|
||||
add_interp_update_log(
|
||||
update,
|
||||
&global_replacers,
|
||||
&secret_replacers,
|
||||
);
|
||||
|
||||
Ok(secret_replacers)
|
||||
Ok(interpolator.secret_replacers)
|
||||
} else {
|
||||
Ok(Default::default())
|
||||
}
|
||||
|
||||
@@ -15,22 +15,32 @@ use resolver_api::Resolve;
|
||||
use crate::{
|
||||
helpers::{periphery_client, update::update_update},
|
||||
monitor::update_cache_for_server,
|
||||
resource,
|
||||
permission::get_check_permissions,
|
||||
state::action_states,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartContainer {
|
||||
#[instrument(name = "StartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -50,7 +60,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StartContainer {
|
||||
@@ -66,7 +76,7 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -76,15 +86,25 @@ impl Resolve<ExecuteArgs> for StartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
#[instrument(name = "RestartContainer", skip(self, user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -104,7 +124,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RestartContainer {
|
||||
@@ -122,7 +142,7 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -132,15 +152,25 @@ impl Resolve<ExecuteArgs> for RestartContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
#[instrument(name = "PauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -160,7 +190,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PauseContainer {
|
||||
@@ -176,7 +206,7 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -186,15 +216,25 @@ impl Resolve<ExecuteArgs> for PauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
#[instrument(name = "UnpauseContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -214,7 +254,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::UnpauseContainer {
|
||||
@@ -232,7 +272,7 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -242,15 +282,27 @@ impl Resolve<ExecuteArgs> for UnpauseContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopContainer {
|
||||
#[instrument(name = "StopContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -270,7 +322,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::StopContainer {
|
||||
@@ -288,7 +340,7 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -298,10 +350,22 @@ impl Resolve<ExecuteArgs> for StopContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
#[instrument(name = "DestroyContainer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DestroyContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
container = self.container,
|
||||
signal = format!("{:?}", self.signal),
|
||||
time = self.time,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let DestroyContainer {
|
||||
server,
|
||||
@@ -309,10 +373,10 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
signal,
|
||||
time,
|
||||
} = self;
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -332,7 +396,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
// Send update after setting action state, this way frontend gets correct state.
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::RemoveContainer {
|
||||
@@ -350,7 +414,7 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -360,15 +424,24 @@ impl Resolve<ExecuteArgs> for DestroyContainer {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
#[instrument(name = "StartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -387,7 +460,8 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StartAllContainers {})
|
||||
.await
|
||||
.context("failed to start all containers on host")?;
|
||||
@@ -401,7 +475,7 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -410,15 +484,24 @@ impl Resolve<ExecuteArgs> for StartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
#[instrument(name = "RestartAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RestartAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -437,7 +520,8 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RestartAllContainers {})
|
||||
.await
|
||||
.context("failed to restart all containers on host")?;
|
||||
@@ -453,7 +537,7 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -462,15 +546,24 @@ impl Resolve<ExecuteArgs> for RestartAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
#[instrument(name = "PauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -489,7 +582,8 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::PauseAllContainers {})
|
||||
.await
|
||||
.context("failed to pause all containers on host")?;
|
||||
@@ -503,7 +597,7 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -512,15 +606,24 @@ impl Resolve<ExecuteArgs> for PauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
#[instrument(name = "UnpauseAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"UnpauseAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -539,7 +642,8 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::UnpauseAllContainers {})
|
||||
.await
|
||||
.context("failed to unpause all containers on host")?;
|
||||
@@ -555,7 +659,7 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -564,15 +668,24 @@ impl Resolve<ExecuteArgs> for UnpauseAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
#[instrument(name = "StopAllContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"StopAllContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -591,7 +704,8 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let logs = periphery_client(&server)?
|
||||
let logs = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::StopAllContainers {})
|
||||
.await
|
||||
.context("failed to stop all containers on host")?;
|
||||
@@ -605,7 +719,7 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
);
|
||||
}
|
||||
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -614,15 +728,24 @@ impl Resolve<ExecuteArgs> for StopAllContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
#[instrument(name = "PruneContainers", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneContainers",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -641,7 +764,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::container::PruneContainers {})
|
||||
@@ -660,7 +783,7 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -670,15 +793,25 @@ impl Resolve<ExecuteArgs> for PruneContainers {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
#[instrument(name = "DeleteNetwork", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteNetwork",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
network = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -686,10 +819,10 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::DeleteNetwork {
|
||||
.request(api::docker::DeleteNetwork {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -711,7 +844,7 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -721,15 +854,24 @@ impl Resolve<ExecuteArgs> for DeleteNetwork {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
#[instrument(name = "PruneNetworks", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneNetworks",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -748,10 +890,10 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::network::PruneNetworks {})
|
||||
.request(api::docker::PruneNetworks {})
|
||||
.await
|
||||
.context(format!(
|
||||
"failed to prune networks on server {}",
|
||||
@@ -765,7 +907,7 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -775,15 +917,25 @@ impl Resolve<ExecuteArgs> for PruneNetworks {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
#[instrument(name = "DeleteImage", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteImage",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
image = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -791,10 +943,10 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::image::DeleteImage {
|
||||
.request(api::docker::DeleteImage {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -813,7 +965,7 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -823,15 +975,24 @@ impl Resolve<ExecuteArgs> for DeleteImage {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneImages {
|
||||
#[instrument(name = "PruneImages", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneImages",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -850,10 +1011,10 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::image::PruneImages {}).await {
|
||||
match periphery.request(api::docker::PruneImages {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune images",
|
||||
@@ -865,7 +1026,7 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -875,15 +1036,25 @@ impl Resolve<ExecuteArgs> for PruneImages {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
#[instrument(name = "DeleteVolume", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"DeleteVolume",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
volume = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -891,10 +1062,10 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery
|
||||
.request(api::volume::DeleteVolume {
|
||||
.request(api::docker::DeleteVolume {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -916,7 +1087,7 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -926,15 +1097,24 @@ impl Resolve<ExecuteArgs> for DeleteVolume {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
#[instrument(name = "PruneVolumes", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneVolumes",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -953,10 +1133,10 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::volume::PruneVolumes {}).await {
|
||||
match periphery.request(api::docker::PruneVolumes {}).await {
|
||||
Ok(log) => log,
|
||||
Err(e) => Log::error(
|
||||
"prune volumes",
|
||||
@@ -968,7 +1148,7 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -978,15 +1158,24 @@ impl Resolve<ExecuteArgs> for PruneVolumes {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
#[instrument(name = "PruneDockerBuilders", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneDockerBuilders",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1005,7 +1194,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuilders {}).await {
|
||||
@@ -1020,7 +1209,7 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1030,15 +1219,24 @@ impl Resolve<ExecuteArgs> for PruneDockerBuilders {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
#[instrument(name = "PruneBuildx", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneBuildx",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1057,7 +1255,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log =
|
||||
match periphery.request(api::build::PruneBuildx {}).await {
|
||||
@@ -1072,7 +1270,7 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
@@ -1082,15 +1280,24 @@ impl Resolve<ExecuteArgs> for PruneBuildx {
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
#[instrument(name = "PruneSystem", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"PruneSystem",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -1109,7 +1316,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let log = match periphery.request(api::PruneSystem {}).await {
|
||||
Ok(log) => log,
|
||||
@@ -1123,7 +1330,7 @@ impl Resolve<ExecuteArgs> for PruneSystem {
|
||||
};
|
||||
|
||||
update.logs.push(log);
|
||||
update_cache_for_server(&server).await;
|
||||
update_cache_for_server(&server, true).await;
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
server::PartialServerConfig,
|
||||
server_template::{ServerTemplate, ServerTemplateConfig},
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
cloud::{
|
||||
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
|
||||
},
|
||||
helpers::update::update_update,
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for LaunchServer {
|
||||
#[instrument(name = "LaunchServer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
.servers
|
||||
.find_one(doc! {
|
||||
"name": &self.name
|
||||
})
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("name is already taken").into());
|
||||
}
|
||||
|
||||
let template = resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update.push_simple_log(
|
||||
"launching server",
|
||||
format!("{:#?}", template.config),
|
||||
);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let instance =
|
||||
match launch_ec2_instance(&self.name, config).await {
|
||||
Ok(instance) => instance,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, instance.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", instance.ip)
|
||||
.into(),
|
||||
region: region.into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let datacenter = config.datacenter;
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let server =
|
||||
match launch_hetzner_server(&self.name, config).await {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch hetzner server\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, server.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", server.ip)
|
||||
.into(),
|
||||
region: datacenter.as_ref().to_string().into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match (CreateServer {
|
||||
name: self.name,
|
||||
config,
|
||||
})
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await
|
||||
{
|
||||
Ok(server) => {
|
||||
update.push_simple_log(
|
||||
"create server",
|
||||
format!("created server {} ({})", server.name, server.id),
|
||||
);
|
||||
update.other_data = server.id;
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"create server",
|
||||
format_serror(
|
||||
&e.error.context("failed to create server").into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,10 @@
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use formatting::{Color, colored, format_serror};
|
||||
use komodo_client::{
|
||||
api::{execute::RunSync, write::RefreshResourceSyncPending},
|
||||
@@ -16,24 +20,24 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Log, Update},
|
||||
user::sync_user,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::oid::ObjectId};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
helpers::{query::get_id_to_tags, update::update_update},
|
||||
resource,
|
||||
helpers::{
|
||||
all_resources::AllResourcesById, query::get_id_to_tags,
|
||||
update::update_update,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
state::{action_states, db_client},
|
||||
sync::{
|
||||
AllResourcesById, ResourceSyncTrait,
|
||||
ResourceSyncTrait,
|
||||
deploy::{
|
||||
SyncDeployParams, build_deploy_cache, deploy_from_cache,
|
||||
},
|
||||
@@ -45,26 +49,47 @@ use crate::{
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for RunSync {
|
||||
#[instrument(name = "RunSync", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
#[instrument(
|
||||
"RunSync",
|
||||
skip_all,
|
||||
fields(
|
||||
id = id.to_string(),
|
||||
operator = user.id,
|
||||
update_id = update.id,
|
||||
sync = self.sync,
|
||||
resource_type = format!("{:?}", self.resource_type),
|
||||
resources = format!("{:?}", self.resources),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
ExecuteArgs { user, update, id }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let RunSync {
|
||||
sync,
|
||||
resource_type: match_resource_type,
|
||||
resources: match_resources,
|
||||
} = self;
|
||||
let sync = resource::get_check_permissions::<
|
||||
entities::sync::ResourceSync,
|
||||
>(&sync, user, PermissionLevel::Execute)
|
||||
let sync = get_check_permissions::<entities::sync::ResourceSync>(
|
||||
&sync,
|
||||
user,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !sync.config.files_on_host
|
||||
&& !sync.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&sync.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// get the action state for the sync (or insert default).
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.get_or_insert_default(&sync.id)
|
||||
.await;
|
||||
let action_state =
|
||||
action_states().sync.get_or_insert_default(&sync.id).await;
|
||||
|
||||
// This will set action state back to default when dropped.
|
||||
// Will also check to ensure sync not already busy before updating.
|
||||
@@ -83,9 +108,10 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
message,
|
||||
file_errors,
|
||||
..
|
||||
} = crate::sync::remote::get_remote_resources(&sync)
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
} =
|
||||
crate::sync::remote::get_remote_resources(&sync, repo.as_ref())
|
||||
.await
|
||||
.context("failed to get remote resources")?;
|
||||
|
||||
update.logs.extend(logs);
|
||||
update_update(update.clone()).await?;
|
||||
@@ -142,10 +168,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
.servers
|
||||
.get(&name_or_id)
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::ServerTemplate => all_resources
|
||||
.templates
|
||||
.get(&name_or_id)
|
||||
.map(|t| t.name.clone()),
|
||||
ResourceTargetVariant::Stack => all_resources
|
||||
.stacks
|
||||
.get(&name_or_id)
|
||||
@@ -200,7 +222,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
deployment_map: &deployments_by_name,
|
||||
stacks: &resources.stacks,
|
||||
stack_map: &stacks_by_name,
|
||||
all_resources: &all_resources,
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -210,7 +231,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Server>(
|
||||
resources.servers,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -224,7 +244,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Stack>(
|
||||
resources.stacks,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -238,7 +257,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Deployment>(
|
||||
resources.deployments,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -252,7 +270,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Build>(
|
||||
resources.builds,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -266,7 +283,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Repo>(
|
||||
resources.repos,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -280,7 +296,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Procedure>(
|
||||
resources.procedures,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -294,7 +309,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Action>(
|
||||
resources.actions,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -308,7 +322,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Builder>(
|
||||
resources.builders,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -322,21 +335,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<Alerter>(
|
||||
resources.alerters,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let server_template_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -350,7 +348,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
@@ -388,7 +385,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
crate::sync::user_groups::get_updates_for_execution(
|
||||
resources.user_groups,
|
||||
delete,
|
||||
&all_resources,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
@@ -397,7 +393,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
|
||||
if deploy_cache.is_empty()
|
||||
&& resource_sync_deltas.no_changes()
|
||||
&& server_template_deltas.no_changes()
|
||||
&& server_deltas.no_changes()
|
||||
&& deployment_deltas.no_changes()
|
||||
&& stack_deltas.no_changes()
|
||||
@@ -451,11 +446,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
&mut update.logs,
|
||||
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
ServerTemplate::execute_sync_updates(server_template_deltas)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Server::execute_sync_updates(server_deltas).await,
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod read;
|
||||
pub mod terminal;
|
||||
pub mod user;
|
||||
pub mod write;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct Variant {
|
||||
variant: String,
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_state_cache, action_states},
|
||||
};
|
||||
@@ -24,10 +25,10 @@ impl Resolve<ReadArgs> for GetAction {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Action>(
|
||||
get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -75,10 +76,10 @@ impl Resolve<ReadArgs> for GetActionActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ActionActionState> {
|
||||
let action = resource::get_check_permissions::<Action>(
|
||||
let action = get_check_permissions::<Action>(
|
||||
&self.action,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -121,8 +122,8 @@ impl Resolve<ReadArgs> for GetActionsSummary {
|
||||
.unwrap_or_default()
|
||||
.get()?,
|
||||
) {
|
||||
(_, action_states) if action_states.running => {
|
||||
res.running += 1;
|
||||
(_, action_states) if action_states.running > 0 => {
|
||||
res.running += action_states.running;
|
||||
}
|
||||
(ActionState::Ok, _) => res.ok += 1,
|
||||
(ActionState::Failed, _) => res.failed += 1,
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetAlert, GetAlertResponse, ListAlerts, ListAlertsResponse,
|
||||
@@ -8,15 +13,10 @@ use komodo_client::{
|
||||
sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config, resource::get_resource_ids_for_user,
|
||||
config::core_config, permission::get_resource_ids_for_user,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -6,12 +8,11 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -22,10 +23,10 @@ impl Resolve<ReadArgs> for GetAlerter {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
get_check_permissions::<Alerter>(
|
||||
&self.alerter,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
|
||||
@@ -2,30 +2,27 @@ use std::collections::{HashMap, HashSet};
|
||||
|
||||
use anyhow::Context;
|
||||
use async_timing_util::unix_timestamp_ms;
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use futures::TryStreamExt;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
Operation,
|
||||
build::{Build, BuildActionState, BuildListItem, BuildState},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
update::UpdateStatus,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{
|
||||
action_states, build_state_cache, db_client, github_client,
|
||||
},
|
||||
state::{action_states, build_state_cache, db_client},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -36,10 +33,10 @@ impl Resolve<ReadArgs> for GetBuild {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Build>(
|
||||
get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -87,10 +84,10 @@ impl Resolve<ReadArgs> for GetBuildActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<BuildActionState> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -218,10 +215,10 @@ impl Resolve<ReadArgs> for ListBuildVersions {
|
||||
patch,
|
||||
limit,
|
||||
} = self;
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
let build = get_check_permissions::<Build>(
|
||||
&build,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -293,81 +290,3 @@ impl Resolve<ReadArgs> for ListCommonBuildExtraArgs {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetBuildWebhookEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetBuildWebhookEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com"
|
||||
|| build.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: false,
|
||||
enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetBuildWebhookEnabledResponse {
|
||||
managed: true,
|
||||
enabled: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::Document;
|
||||
use database::mungos::mongodb::bson::doc;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -6,12 +8,11 @@ use komodo_client::{
|
||||
permission::PermissionLevel,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -22,10 +23,10 @@ impl Resolve<ReadArgs> for GetBuilder {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Builder>(
|
||||
get_check_permissions::<Builder>(
|
||||
&self.builder,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
|
||||
@@ -8,19 +8,22 @@ use komodo_client::{
|
||||
Deployment, DeploymentActionState, DeploymentConfig,
|
||||
DeploymentListItem, DeploymentState,
|
||||
},
|
||||
docker::container::ContainerStats,
|
||||
docker::container::{Container, ContainerStats},
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server::{Server, ServerState},
|
||||
update::Log,
|
||||
},
|
||||
};
|
||||
use periphery_client::api;
|
||||
use periphery_client::api::{self, container::InspectContainer};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, deployment_status_cache},
|
||||
state::{
|
||||
action_states, deployment_status_cache, server_status_cache,
|
||||
},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -31,10 +34,10 @@ impl Resolve<ReadArgs> for GetDeployment {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -92,10 +95,10 @@ impl Resolve<ReadArgs> for GetDeploymentContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetDeploymentContainerResponse> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let status = deployment_status_cache()
|
||||
@@ -126,17 +129,18 @@ impl Resolve<ReadArgs> for GetDeploymentLog {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
} = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLog {
|
||||
name,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -164,17 +168,18 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
} = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Ok(Log::default());
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerLogSearch {
|
||||
name,
|
||||
terms,
|
||||
@@ -188,6 +193,51 @@ impl Resolve<ReadArgs> for SearchDeploymentLog {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectDeploymentContainer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let InspectDeploymentContainer { deployment } = self;
|
||||
let Deployment {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
user,
|
||||
PermissionLevel::Read.inspect(),
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect deployment, not attached to any server"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect container: server is {:?}",
|
||||
cache.state
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -197,10 +247,10 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
name,
|
||||
config: DeploymentConfig { server_id, .. },
|
||||
..
|
||||
} = resource::get_check_permissions::<Deployment>(
|
||||
} = get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
if server_id.is_empty() {
|
||||
@@ -209,7 +259,8 @@ impl Resolve<ReadArgs> for GetDeploymentStats {
|
||||
);
|
||||
}
|
||||
let server = resource::get::<Server>(&server_id).await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::GetContainerStats { name })
|
||||
.await
|
||||
.context("failed to get stats from periphery")?;
|
||||
@@ -222,10 +273,10 @@ impl Resolve<ReadArgs> for GetDeploymentActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<DeploymentActionState> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&self.deployment,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -267,7 +318,9 @@ impl Resolve<ReadArgs> for GetDeploymentsSummary {
|
||||
res.not_deployed += 1;
|
||||
}
|
||||
DeploymentState::Unknown => {
|
||||
res.unknown += 1;
|
||||
if !deployment.template {
|
||||
res.unknown += 1;
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
res.unhealthy += 1;
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::{collections::HashSet, sync::OnceLock, time::Instant};
|
||||
use std::{collections::HashSet, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -18,27 +20,33 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, config::core_config, helpers::periphery_client,
|
||||
auth::auth_request,
|
||||
config::{core_config, core_keys},
|
||||
helpers::periphery_client,
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod onboarding_key;
|
||||
mod permission;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod schedule;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
@@ -67,7 +75,7 @@ enum ReadRequest {
|
||||
|
||||
// ==== USER ====
|
||||
GetUsername(GetUsername),
|
||||
GetPermissionLevel(GetPermissionLevel),
|
||||
GetPermission(GetPermission),
|
||||
FindUser(FindUser),
|
||||
ListUsers(ListUsers),
|
||||
ListApiKeys(ListApiKeys),
|
||||
@@ -93,36 +101,54 @@ enum ReadRequest {
|
||||
ListActions(ListActions),
|
||||
ListFullActions(ListFullActions),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate(GetServerTemplate),
|
||||
GetServerTemplatesSummary(GetServerTemplatesSummary),
|
||||
ListServerTemplates(ListServerTemplates),
|
||||
ListFullServerTemplates(ListFullServerTemplates),
|
||||
// ==== SCHEDULE ====
|
||||
ListSchedules(ListSchedules),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
GetServerState(GetServerState),
|
||||
GetPeripheryVersion(GetPeripheryVersion),
|
||||
GetPeripheryInformation(GetPeripheryInformation),
|
||||
GetServerActionState(GetServerActionState),
|
||||
GetHistoricalServerStats(GetHistoricalServerStats),
|
||||
ListServers(ListServers),
|
||||
ListFullServers(ListFullServers),
|
||||
ListTerminals(ListTerminals),
|
||||
|
||||
// ==== DOCKER ====
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
InspectDockerContainer(InspectDockerContainer),
|
||||
GetResourceMatchingContainer(GetResourceMatchingContainer),
|
||||
GetContainerLog(GetContainerLog),
|
||||
SearchContainerLog(SearchContainerLog),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
InspectDockerNetwork(InspectDockerNetwork),
|
||||
ListDockerImages(ListDockerImages),
|
||||
InspectDockerImage(InspectDockerImage),
|
||||
ListDockerImageHistory(ListDockerImageHistory),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
GetDockerContainersSummary(GetDockerContainersSummary),
|
||||
ListAllDockerContainers(ListAllDockerContainers),
|
||||
ListDockerContainers(ListDockerContainers),
|
||||
ListDockerNetworks(ListDockerNetworks),
|
||||
ListDockerImages(ListDockerImages),
|
||||
ListDockerVolumes(ListDockerVolumes),
|
||||
ListComposeProjects(ListComposeProjects),
|
||||
InspectDockerVolume(InspectDockerVolume),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
GetSystemStats(GetSystemStats),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackLog(GetStackLog),
|
||||
SearchStackLog(SearchStackLog),
|
||||
InspectStackContainer(InspectStackContainer),
|
||||
ListStacks(ListStacks),
|
||||
ListFullStacks(ListFullStacks),
|
||||
ListStackServices(ListStackServices),
|
||||
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
|
||||
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
GetDeploymentsSummary(GetDeploymentsSummary),
|
||||
@@ -132,6 +158,7 @@ enum ReadRequest {
|
||||
GetDeploymentStats(GetDeploymentStats),
|
||||
GetDeploymentLog(GetDeploymentLog),
|
||||
SearchDeploymentLog(SearchDeploymentLog),
|
||||
InspectDeploymentContainer(InspectDeploymentContainer),
|
||||
ListDeployments(ListDeployments),
|
||||
ListFullDeployments(ListFullDeployments),
|
||||
ListCommonDeploymentExtraArgs(ListCommonDeploymentExtraArgs),
|
||||
@@ -142,7 +169,6 @@ enum ReadRequest {
|
||||
GetBuildActionState(GetBuildActionState),
|
||||
GetBuildMonthlyStats(GetBuildMonthlyStats),
|
||||
ListBuildVersions(ListBuildVersions),
|
||||
GetBuildWebhookEnabled(GetBuildWebhookEnabled),
|
||||
ListBuilds(ListBuilds),
|
||||
ListFullBuilds(ListFullBuilds),
|
||||
ListCommonBuildExtraArgs(ListCommonBuildExtraArgs),
|
||||
@@ -151,7 +177,6 @@ enum ReadRequest {
|
||||
GetReposSummary(GetReposSummary),
|
||||
GetRepo(GetRepo),
|
||||
GetRepoActionState(GetRepoActionState),
|
||||
GetRepoWebhooksEnabled(GetRepoWebhooksEnabled),
|
||||
ListRepos(ListRepos),
|
||||
ListFullRepos(ListFullRepos),
|
||||
|
||||
@@ -159,23 +184,9 @@ enum ReadRequest {
|
||||
GetResourceSyncsSummary(GetResourceSyncsSummary),
|
||||
GetResourceSync(GetResourceSync),
|
||||
GetResourceSyncActionState(GetResourceSyncActionState),
|
||||
GetSyncWebhooksEnabled(GetSyncWebhooksEnabled),
|
||||
ListResourceSyncs(ListResourceSyncs),
|
||||
ListFullResourceSyncs(ListFullResourceSyncs),
|
||||
|
||||
// ==== STACK ====
|
||||
GetStacksSummary(GetStacksSummary),
|
||||
GetStack(GetStack),
|
||||
GetStackActionState(GetStackActionState),
|
||||
GetStackWebhooksEnabled(GetStackWebhooksEnabled),
|
||||
GetStackLog(GetStackLog),
|
||||
SearchStackLog(SearchStackLog),
|
||||
ListStacks(ListStacks),
|
||||
ListFullStacks(ListFullStacks),
|
||||
ListStackServices(ListStackServices),
|
||||
ListCommonStackExtraArgs(ListCommonStackExtraArgs),
|
||||
ListCommonStackBuildExtraArgs(ListCommonStackBuildExtraArgs),
|
||||
|
||||
// ==== BUILDER ====
|
||||
GetBuildersSummary(GetBuildersSummary),
|
||||
GetBuilder(GetBuilder),
|
||||
@@ -204,11 +215,6 @@ enum ReadRequest {
|
||||
ListAlerts(ListAlerts),
|
||||
GetAlert(GetAlert),
|
||||
|
||||
// ==== SERVER STATS ====
|
||||
GetSystemInformation(GetSystemInformation),
|
||||
GetSystemStats(GetSystemStats),
|
||||
ListSystemProcesses(ListSystemProcesses),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
GetVariable(GetVariable),
|
||||
ListVariables(ListVariables),
|
||||
@@ -218,15 +224,30 @@ enum ReadRequest {
|
||||
ListGitProviderAccounts(ListGitProviderAccounts),
|
||||
GetDockerRegistryAccount(GetDockerRegistryAccount),
|
||||
ListDockerRegistryAccounts(ListDockerRegistryAccounts),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
ListOnboardingKeys(ListOnboardingKeys),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: ReadRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ReadRequest>,
|
||||
@@ -254,11 +275,13 @@ impl Resolve<ReadArgs> for GetVersion {
|
||||
}
|
||||
}
|
||||
|
||||
fn core_info() -> &'static GetCoreInfoResponse {
|
||||
static CORE_INFO: OnceLock<GetCoreInfoResponse> = OnceLock::new();
|
||||
CORE_INFO.get_or_init(|| {
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
let config = core_config();
|
||||
GetCoreInfoResponse {
|
||||
let info = GetCoreInfoResponse {
|
||||
title: config.title.clone(),
|
||||
monitoring_interval: config.monitoring_interval,
|
||||
webhook_base_url: if config.webhook_base_url.is_empty() {
|
||||
@@ -270,22 +293,12 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
.iter()
|
||||
.map(|i| i.namespace.to_string())
|
||||
.collect(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetCoreInfo {
|
||||
async fn resolve(
|
||||
self,
|
||||
_: &ReadArgs,
|
||||
) -> serror::Result<GetCoreInfoResponse> {
|
||||
Ok(core_info().clone())
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
enable_fancy_toml: config.enable_fancy_toml,
|
||||
timezone: config.timezone.clone(),
|
||||
public_key: core_keys().load().public.to_string(),
|
||||
};
|
||||
Ok(info)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -321,7 +334,8 @@ impl Resolve<ReadArgs> for ListSecrets {
|
||||
};
|
||||
if let Some(id) = server_id {
|
||||
let server = resource::get::<Server>(&id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListSecrets {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -490,7 +504,8 @@ async fn merge_git_providers_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListGitProviders {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -528,7 +543,8 @@ async fn merge_docker_registries_for_server(
|
||||
server_id: &str,
|
||||
) -> serror::Result<()> {
|
||||
let server = resource::get::<Server>(server_id).await?;
|
||||
let more = periphery_client(&server)?
|
||||
let more = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::ListDockerRegistries {})
|
||||
.await
|
||||
.with_context(|| {
|
||||
|
||||
51
bin/core/src/api/read/onboarding_key.rs
Normal file
51
bin/core/src/api/read/onboarding_key.rs
Normal file
@@ -0,0 +1,51 @@
|
||||
use std::cmp::Ordering;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::api::read::{
|
||||
ListOnboardingKeys, ListOnboardingKeysResponse,
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::read::ReadArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<ReadArgs> for ListOnboardingKeys {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user: admin }: &ReadArgs,
|
||||
) -> serror::Result<ListOnboardingKeysResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut keys =
|
||||
find_collect(&db_client().onboarding_keys, None, None)
|
||||
.await
|
||||
.context(
|
||||
"Failed to query database for Server onboarding keys",
|
||||
)?;
|
||||
|
||||
// No expiry keys first, followed
|
||||
keys.sort_by(|a, b| {
|
||||
if a.expires == b.expires {
|
||||
Ordering::Equal
|
||||
} else if a.expires == 0 {
|
||||
Ordering::Less
|
||||
} else if b.expires == 0 {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
// Descending
|
||||
b.expires.cmp(&a.expires)
|
||||
}
|
||||
});
|
||||
|
||||
Ok(keys)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
GetPermissionLevel, GetPermissionLevelResponse, ListPermissions,
|
||||
GetPermission, GetPermissionResponse, ListPermissions,
|
||||
ListPermissionsResponse, ListUserTargetPermissions,
|
||||
ListUserTargetPermissionsResponse,
|
||||
},
|
||||
entities::permission::PermissionLevel,
|
||||
};
|
||||
use mungos::{find::find_collect, mongodb::bson::doc};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
@@ -35,13 +35,13 @@ impl Resolve<ReadArgs> for ListPermissions {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPermissionLevel {
|
||||
impl Resolve<ReadArgs> for GetPermission {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPermissionLevelResponse> {
|
||||
) -> serror::Result<GetPermissionResponse> {
|
||||
if user.admin {
|
||||
return Ok(PermissionLevel::Write);
|
||||
return Ok(PermissionLevel::Write.all());
|
||||
}
|
||||
Ok(get_user_permission_on_target(user, &self.target).await?)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, procedure_state_cache},
|
||||
};
|
||||
@@ -22,10 +23,10 @@ impl Resolve<ReadArgs> for GetProcedure {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetProcedureResponse> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
get_check_permissions::<Procedure>(
|
||||
&self.procedure,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -120,10 +121,10 @@ impl Resolve<ReadArgs> for GetProcedureActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetProcedureActionStateResponse> {
|
||||
let procedure = resource::get_check_permissions::<Procedure>(
|
||||
let procedure = get_check_permissions::<Procedure>(
|
||||
&self.procedure,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::{Document, doc};
|
||||
use mungos::{
|
||||
use database::mongo_indexed::{Document, doc};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id, find::find_collect,
|
||||
mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -2,7 +2,6 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::{Repo, RepoActionState, RepoListItem, RepoState},
|
||||
},
|
||||
@@ -10,10 +9,10 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, github_client, repo_state_cache},
|
||||
state::{action_states, repo_state_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -24,10 +23,10 @@ impl Resolve<ReadArgs> for GetRepo {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Repo>(
|
||||
get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -75,10 +74,10 @@ impl Resolve<ReadArgs> for GetRepoActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<RepoActionState> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -132,7 +131,11 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
}
|
||||
(RepoState::Ok, _) => res.ok += 1,
|
||||
(RepoState::Failed, _) => res.failed += 1,
|
||||
(RepoState::Unknown, _) => res.unknown += 1,
|
||||
(RepoState::Unknown, _) => {
|
||||
if !repo.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
// will never come off the cache in the building state, since that comes from action states
|
||||
(RepoState::Cloning, _)
|
||||
| (RepoState::Pulling, _)
|
||||
@@ -145,104 +148,3 @@ impl Resolve<ReadArgs> for GetReposSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetRepoWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetRepoWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com"
|
||||
|| repo.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
clone_enabled: false,
|
||||
pull_enabled: false,
|
||||
build_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let clone_url =
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id);
|
||||
let pull_url =
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id);
|
||||
let build_url =
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id);
|
||||
|
||||
let mut clone_enabled = false;
|
||||
let mut pull_enabled = false;
|
||||
let mut build_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if !webhook.active {
|
||||
continue;
|
||||
}
|
||||
if webhook.config.url == clone_url {
|
||||
clone_enabled = true
|
||||
}
|
||||
if webhook.config.url == pull_url {
|
||||
pull_enabled = true
|
||||
}
|
||||
if webhook.config.url == build_url {
|
||||
build_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetRepoWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
clone_enabled,
|
||||
pull_enabled,
|
||||
build_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
104
bin/core/src/api/read/schedule.rs
Normal file
104
bin/core/src/api/read/schedule.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
action::Action,
|
||||
procedure::Procedure,
|
||||
resource::{ResourceQuery, TemplatesQueryBehavior},
|
||||
schedule::Schedule,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{get_all_tags, get_last_run_at},
|
||||
resource::list_full_for_user,
|
||||
schedule::get_schedule_item_info,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for ListSchedules {
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &ReadArgs,
|
||||
) -> serror::Result<Vec<Schedule>> {
|
||||
let all_tags = get_all_tags(None).await?;
|
||||
let (actions, procedures) = tokio::try_join!(
|
||||
list_full_for_user::<Action>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
},
|
||||
&args.user,
|
||||
&all_tags,
|
||||
),
|
||||
list_full_for_user::<Procedure>(
|
||||
ResourceQuery {
|
||||
names: Default::default(),
|
||||
templates: TemplatesQueryBehavior::Include,
|
||||
tag_behavior: self.tag_behavior,
|
||||
tags: self.tags.clone(),
|
||||
specific: Default::default(),
|
||||
},
|
||||
&args.user,
|
||||
&all_tags,
|
||||
)
|
||||
)?;
|
||||
let actions = actions.into_iter().map(async |action| {
|
||||
let (next_scheduled_run, schedule_error) =
|
||||
get_schedule_item_info(&ResourceTarget::Action(
|
||||
action.id.clone(),
|
||||
));
|
||||
let last_run_at =
|
||||
get_last_run_at::<Action>(&action.id).await.unwrap_or(None);
|
||||
Schedule {
|
||||
target: ResourceTarget::Action(action.id),
|
||||
name: action.name,
|
||||
enabled: action.config.schedule_enabled,
|
||||
schedule_format: action.config.schedule_format,
|
||||
schedule: action.config.schedule,
|
||||
schedule_timezone: action.config.schedule_timezone,
|
||||
tags: action.tags,
|
||||
last_run_at,
|
||||
next_scheduled_run,
|
||||
schedule_error,
|
||||
}
|
||||
});
|
||||
let procedures = procedures.into_iter().map(async |procedure| {
|
||||
let (next_scheduled_run, schedule_error) =
|
||||
get_schedule_item_info(&ResourceTarget::Procedure(
|
||||
procedure.id.clone(),
|
||||
));
|
||||
let last_run_at = get_last_run_at::<Procedure>(&procedure.id)
|
||||
.await
|
||||
.unwrap_or(None);
|
||||
Schedule {
|
||||
target: ResourceTarget::Procedure(procedure.id),
|
||||
name: procedure.name,
|
||||
enabled: procedure.config.schedule_enabled,
|
||||
schedule_format: procedure.config.schedule_format,
|
||||
schedule: procedure.config.schedule,
|
||||
schedule_timezone: procedure.config.schedule_timezone,
|
||||
tags: procedure.tags,
|
||||
last_run_at,
|
||||
next_scheduled_run,
|
||||
schedule_error,
|
||||
}
|
||||
});
|
||||
let (actions, procedures) =
|
||||
tokio::join!(join_all(actions), join_all(procedures));
|
||||
|
||||
Ok(
|
||||
actions
|
||||
.into_iter()
|
||||
.chain(procedures)
|
||||
.filter(|s| !s.schedule.is_empty())
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -8,6 +8,10 @@ use anyhow::{Context, anyhow};
|
||||
use async_timing_util::{
|
||||
FIFTEEN_SECONDS_MS, get_timelength_in_ms, unix_timestamp_ms,
|
||||
};
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -21,31 +25,32 @@ use komodo_client::{
|
||||
network::Network,
|
||||
volume::Volume,
|
||||
},
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{
|
||||
Server, ServerActionState, ServerListItem, ServerState,
|
||||
TerminalInfo,
|
||||
},
|
||||
stack::{Stack, StackServiceNames},
|
||||
stats::{SystemInformation, SystemProcess},
|
||||
update::Log,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use periphery_client::api::{
|
||||
self as periphery,
|
||||
container::InspectContainer,
|
||||
image::{ImageHistory, InspectImage},
|
||||
network::InspectNetwork,
|
||||
volume::InspectVolume,
|
||||
docker::{
|
||||
ImageHistory, InspectImage, InspectNetwork, InspectVolume,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCode;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::compose_container_match_regex,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
@@ -64,18 +69,29 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
&[],
|
||||
)
|
||||
.await?;
|
||||
|
||||
let core_version = env!("CARGO_PKG_VERSION");
|
||||
let mut res = GetServersSummaryResponse::default();
|
||||
|
||||
for server in servers {
|
||||
res.total += 1;
|
||||
match server.info.state {
|
||||
ServerState::Ok => {
|
||||
res.healthy += 1;
|
||||
// Check for version mismatch
|
||||
if matches!(&server.info.version, Some(version) if version != core_version)
|
||||
{
|
||||
res.warning += 1;
|
||||
} else {
|
||||
res.healthy += 1;
|
||||
}
|
||||
}
|
||||
ServerState::NotOk => {
|
||||
res.unhealthy += 1;
|
||||
}
|
||||
ServerState::Disabled => {
|
||||
res.disabled += 1;
|
||||
if !server.template {
|
||||
res.disabled += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -83,36 +99,16 @@ impl Resolve<ReadArgs> for GetServersSummary {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetPeripheryVersion {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryVersionResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
let version = server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.map(|s| s.version.clone())
|
||||
.unwrap_or(String::from("unknown"));
|
||||
Ok(GetPeripheryVersionResponse { version })
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Server>(
|
||||
get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -160,10 +156,10 @@ impl Resolve<ReadArgs> for GetServerState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerStateResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let status = server_status_cache()
|
||||
@@ -182,10 +178,10 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ServerActionState> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -198,14 +194,27 @@ impl Resolve<ReadArgs> for GetServerActionState {
|
||||
}
|
||||
}
|
||||
|
||||
// This protects the peripheries from spam requests
|
||||
const SYSTEM_INFO_EXPIRY: u128 = FIFTEEN_SECONDS_MS;
|
||||
type SystemInfoCache =
|
||||
Mutex<HashMap<String, Arc<(SystemInformation, u128)>>>;
|
||||
fn system_info_cache() -> &'static SystemInfoCache {
|
||||
static SYSTEM_INFO_CACHE: OnceLock<SystemInfoCache> =
|
||||
OnceLock::new();
|
||||
SYSTEM_INFO_CACHE.get_or_init(Default::default)
|
||||
impl Resolve<ReadArgs> for GetPeripheryInformation {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetPeripheryInformationResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.periphery_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing Periphery Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
@@ -213,31 +222,22 @@ impl Resolve<ReadArgs> for GetSystemInformation {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<SystemInformation> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut lock = system_info_cache().lock().await;
|
||||
let res = match lock.get(&server.id) {
|
||||
Some(cached) if cached.1 > unix_timestamp_ms() => {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
.request(periphery::stats::GetSystemInformation {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
server.id,
|
||||
(stats.clone(), unix_timestamp_ms() + SYSTEM_INFO_EXPIRY)
|
||||
.into(),
|
||||
);
|
||||
stats
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
.await
|
||||
.status_code(StatusCode::BAD_REQUEST)?;
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_info
|
||||
.as_ref()
|
||||
.cloned()
|
||||
.context("Server status missing system Info. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -246,21 +246,21 @@ impl Resolve<ReadArgs> for GetSystemStats {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSystemStatsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let status =
|
||||
server_status_cache().get(&server.id).await.with_context(
|
||||
|| format!("did not find status for server at {}", server.id),
|
||||
)?;
|
||||
let stats = status
|
||||
.stats
|
||||
server_status_cache()
|
||||
.get(&server.id)
|
||||
.await
|
||||
.context("Missing server status")?
|
||||
.system_stats
|
||||
.as_ref()
|
||||
.context("server stats not available")?;
|
||||
Ok(stats.clone())
|
||||
.cloned()
|
||||
.context("Server status missing system stats. The Server may be disconnected.")
|
||||
.status_code(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,10 +278,10 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListSystemProcessesResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.processes(),
|
||||
)
|
||||
.await?;
|
||||
let mut lock = processes_cache().lock().await;
|
||||
@@ -290,7 +290,8 @@ impl Resolve<ReadArgs> for ListSystemProcesses {
|
||||
cached.0.clone()
|
||||
}
|
||||
_ => {
|
||||
let stats = periphery_client(&server)?
|
||||
let stats = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::stats::GetSystemProcesses {})
|
||||
.await?;
|
||||
lock.insert(
|
||||
@@ -317,10 +318,10 @@ impl Resolve<ReadArgs> for GetHistoricalServerStats {
|
||||
granularity,
|
||||
page,
|
||||
} = self;
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let granularity =
|
||||
@@ -365,10 +366,10 @@ impl Resolve<ReadArgs> for ListDockerContainers {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerContainersResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -459,10 +460,10 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.inspect(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -477,7 +478,8 @@ impl Resolve<ReadArgs> for InspectDockerContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.container,
|
||||
})
|
||||
@@ -499,13 +501,14 @@ impl Resolve<ReadArgs> for GetContainerLog {
|
||||
tail,
|
||||
timestamps,
|
||||
} = self;
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLog {
|
||||
name: container,
|
||||
tail: cmp::min(tail, MAX_LOG_LENGTH),
|
||||
@@ -530,13 +533,14 @@ impl Resolve<ReadArgs> for SearchContainerLog {
|
||||
invert,
|
||||
timestamps,
|
||||
} = self;
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.logs(),
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery::container::GetContainerLogSearch {
|
||||
name: container,
|
||||
terms,
|
||||
@@ -555,10 +559,10 @@ impl Resolve<ReadArgs> for GetResourceMatchingContainer {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetResourceMatchingContainerResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
// first check deployments
|
||||
@@ -616,10 +620,10 @@ impl Resolve<ReadArgs> for ListDockerNetworks {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerNetworksResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -638,10 +642,10 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Network> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -656,7 +660,8 @@ impl Resolve<ReadArgs> for InspectDockerNetwork {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectNetwork { name: self.network })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -668,10 +673,10 @@ impl Resolve<ReadArgs> for ListDockerImages {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerImagesResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -690,10 +695,10 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Image> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -705,7 +710,8 @@ impl Resolve<ReadArgs> for InspectDockerImage {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectImage { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -717,10 +723,10 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Vec<ImageHistoryResponseItem>> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -735,7 +741,8 @@ impl Resolve<ReadArgs> for ListDockerImageHistory {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(ImageHistory { name: self.image })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -747,10 +754,10 @@ impl Resolve<ReadArgs> for ListDockerVolumes {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListDockerVolumesResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -769,10 +776,10 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Volume> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -784,7 +791,8 @@ impl Resolve<ReadArgs> for InspectDockerVolume {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let res = periphery_client(&server)?
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectVolume { name: self.volume })
|
||||
.await?;
|
||||
Ok(res)
|
||||
@@ -796,10 +804,10 @@ impl Resolve<ReadArgs> for ListComposeProjects {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListComposeProjectsResponse> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -812,3 +820,69 @@ impl Resolve<ReadArgs> for ListComposeProjects {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TerminalCacheItem {
|
||||
list: Vec<TerminalInfo>,
|
||||
ttl: i64,
|
||||
}
|
||||
|
||||
const TERMINAL_CACHE_TIMEOUT: i64 = 30_000;
|
||||
|
||||
#[derive(Default)]
|
||||
struct TerminalCache(
|
||||
std::sync::Mutex<
|
||||
HashMap<String, Arc<tokio::sync::Mutex<TerminalCacheItem>>>,
|
||||
>,
|
||||
);
|
||||
|
||||
impl TerminalCache {
|
||||
fn get_or_insert(
|
||||
&self,
|
||||
server_id: String,
|
||||
) -> Arc<tokio::sync::Mutex<TerminalCacheItem>> {
|
||||
if let Some(cached) =
|
||||
self.0.lock().unwrap().get(&server_id).cloned()
|
||||
{
|
||||
return cached;
|
||||
}
|
||||
let to_cache =
|
||||
Arc::new(tokio::sync::Mutex::new(TerminalCacheItem::default()));
|
||||
self.0.lock().unwrap().insert(server_id, to_cache.clone());
|
||||
to_cache
|
||||
}
|
||||
}
|
||||
|
||||
fn terminals_cache() -> &'static TerminalCache {
|
||||
static TERMINALS: OnceLock<TerminalCache> = OnceLock::new();
|
||||
TERMINALS.get_or_init(Default::default)
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListTerminals {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListTerminalsResponse> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
let cache = terminals_cache().get_or_insert(server.id.clone());
|
||||
let mut cache = cache.lock().await;
|
||||
if self.fresh || komodo_timestamp() > cache.ttl {
|
||||
cache.list = periphery_client(&server)
|
||||
.await?
|
||||
.request(periphery_client::api::terminal::ListTerminals {
|
||||
container: None,
|
||||
})
|
||||
.await
|
||||
.context("Failed to get fresh terminal list")?;
|
||||
cache.ttl = komodo_timestamp() + TERMINAL_CACHE_TIMEOUT;
|
||||
Ok(cache.list.clone())
|
||||
} else {
|
||||
Ok(cache.list.clone())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplate {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplateResponse> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListFullServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListFullServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplatesSummary {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplatesSummaryResponse> {
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
ServerTemplate,
|
||||
>(user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.server_templates
|
||||
.count_documents(query)
|
||||
.await
|
||||
.context("failed to count all server template documents")?;
|
||||
let res = GetServerTemplatesSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -1,25 +1,27 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use anyhow::Context;
|
||||
use anyhow::{Context, anyhow};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
docker::container::Container,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
stack::{Stack, StackActionState, StackListItem, StackState},
|
||||
},
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeLog, GetComposeLogSearch,
|
||||
use periphery_client::api::{
|
||||
compose::{GetComposeLog, GetComposeLogSearch},
|
||||
container::InspectContainer,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::{periphery_client, query::get_all_tags},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::get_stack_and_server,
|
||||
state::{action_states, github_client, stack_status_cache},
|
||||
state::{action_states, server_status_cache, stack_status_cache},
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -30,10 +32,10 @@ impl Resolve<ReadArgs> for GetStack {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<Stack>(
|
||||
get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -45,10 +47,10 @@ impl Resolve<ReadArgs> for ListStackServices {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListStackServicesResponse> {
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -75,10 +77,15 @@ impl Resolve<ReadArgs> for GetStackLog {
|
||||
tail,
|
||||
timestamps,
|
||||
} = self;
|
||||
let (stack, server) =
|
||||
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLog {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -104,10 +111,15 @@ impl Resolve<ReadArgs> for SearchStackLog {
|
||||
invert,
|
||||
timestamps,
|
||||
} = self;
|
||||
let (stack, server) =
|
||||
get_stack_and_server(&stack, user, PermissionLevel::Read, true)
|
||||
.await?;
|
||||
let res = periphery_client(&server)?
|
||||
let (stack, server) = get_stack_and_server(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.logs(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeLogSearch {
|
||||
project: stack.project_name(false),
|
||||
services,
|
||||
@@ -122,6 +134,61 @@ impl Resolve<ReadArgs> for SearchStackLog {
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for InspectStackContainer {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<Container> {
|
||||
let InspectStackContainer { stack, service } = self;
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Read.inspect(),
|
||||
)
|
||||
.await?;
|
||||
if stack.config.server_id.is_empty() {
|
||||
return Err(
|
||||
anyhow!("Cannot inspect stack, not attached to any server")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let server =
|
||||
resource::get::<Server>(&stack.config.server_id).await?;
|
||||
let cache = server_status_cache()
|
||||
.get_or_insert_default(&server.id)
|
||||
.await;
|
||||
if cache.state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot inspect container: server is {:?}",
|
||||
cache.state
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let services = &stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.curr
|
||||
.services;
|
||||
let Some(name) = services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.and_then(|s| s.container.as_ref().map(|c| c.name.clone()))
|
||||
else {
|
||||
return Err(anyhow!(
|
||||
"No service found matching '{service}'. Was the stack last deployed manually?"
|
||||
).into());
|
||||
};
|
||||
let res = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer { name })
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListCommonStackExtraArgs {
|
||||
async fn resolve(
|
||||
self,
|
||||
@@ -240,10 +307,10 @@ impl Resolve<ReadArgs> for GetStackActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<StackActionState> {
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
@@ -280,7 +347,11 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
StackState::Running => res.running += 1,
|
||||
StackState::Stopped | StackState::Paused => res.stopped += 1,
|
||||
StackState::Down => res.down += 1,
|
||||
StackState::Unknown => res.unknown += 1,
|
||||
StackState::Unknown => {
|
||||
if !stack.template {
|
||||
res.unknown += 1
|
||||
}
|
||||
}
|
||||
_ => res.unhealthy += 1,
|
||||
}
|
||||
}
|
||||
@@ -288,91 +359,3 @@ impl Resolve<ReadArgs> for GetStacksSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetStackWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetStackWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com"
|
||||
|| stack.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
deploy_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id);
|
||||
let deploy_url =
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut deploy_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == deploy_url {
|
||||
deploy_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetStackWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
deploy_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
sync::{
|
||||
ResourceSync, ResourceSyncActionState, ResourceSyncListItem,
|
||||
@@ -12,10 +11,8 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
helpers::query::get_all_tags,
|
||||
resource,
|
||||
state::{action_states, github_client},
|
||||
helpers::query::get_all_tags, permission::get_check_permissions,
|
||||
resource, state::action_states,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
@@ -26,10 +23,10 @@ impl Resolve<ReadArgs> for GetResourceSync {
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ResourceSync> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
@@ -79,14 +76,14 @@ impl Resolve<ReadArgs> for GetResourceSyncActionState {
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ResourceSyncActionState> {
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
let sync = get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
let action_state = action_states()
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -130,7 +127,7 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
continue;
|
||||
}
|
||||
if action_states
|
||||
.resource_sync
|
||||
.sync
|
||||
.get(&resource_sync.id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
@@ -146,91 +143,3 @@ impl Resolve<ReadArgs> for GetResourceSyncsSummary {
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetSyncWebhooksEnabled {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetSyncWebhooksEnabledResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
&self.sync,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if sync.config.git_provider != "github.com"
|
||||
|| sync.config.repo.is_empty()
|
||||
{
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
}
|
||||
|
||||
let mut split = sync.config.repo.split('/');
|
||||
let owner = split.next().context("Sync repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: false,
|
||||
refresh_enabled: false,
|
||||
sync_enabled: false,
|
||||
});
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let refresh_url =
|
||||
format!("{host}/listener/github/sync/{}/refresh", sync.id);
|
||||
let sync_url =
|
||||
format!("{host}/listener/github/sync/{}/sync", sync.id);
|
||||
|
||||
let mut refresh_enabled = false;
|
||||
let mut sync_enabled = false;
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == refresh_url {
|
||||
refresh_enabled = true
|
||||
}
|
||||
if webhook.active && webhook.config.url == sync_url {
|
||||
sync_enabled = true
|
||||
}
|
||||
}
|
||||
|
||||
Ok(GetSyncWebhooksEnabledResponse {
|
||||
managed: true,
|
||||
refresh_enabled,
|
||||
sync_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{GetTag, ListTags},
|
||||
entities::tag::Tag,
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_tag, state::db_client};
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::Context;
|
||||
use database::mungos::find::find_collect;
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
ExportAllResourcesToToml, ExportAllResourcesToTomlResponse,
|
||||
@@ -9,24 +10,23 @@ use komodo_client::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment,
|
||||
permission::PermissionLevel, procedure::Procedure, repo::Repo,
|
||||
resource::ResourceQuery, server::Server,
|
||||
server_template::ServerTemplate, stack::Stack,
|
||||
resource::ResourceQuery, server::Server, stack::Stack,
|
||||
sync::ResourceSync, toml::ResourcesToml, user::User,
|
||||
},
|
||||
};
|
||||
use mungos::find::find_collect;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::{
|
||||
get_all_tags, get_id_to_tags, get_user_user_group_ids,
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::db_client,
|
||||
sync::{
|
||||
AllResourcesById,
|
||||
toml::{TOML_PRETTY_OPTIONS, ToToml, convert_resource},
|
||||
user_groups::convert_user_groups,
|
||||
toml::{ToToml, convert_resource},
|
||||
user_groups::{convert_user_groups, user_group_to_toml},
|
||||
variables::variable_to_toml,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -43,7 +43,7 @@ async fn get_all_targets(
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
targets.extend(
|
||||
resource::list_for_user::<Alerter>(
|
||||
resource::list_full_for_user::<Alerter>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -53,7 +53,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Alerter(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Builder>(
|
||||
resource::list_full_for_user::<Builder>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -63,7 +63,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Builder(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Server>(
|
||||
resource::list_full_for_user::<Server>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -73,7 +73,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Server(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Stack>(
|
||||
resource::list_full_for_user::<Stack>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -83,7 +83,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Stack(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Deployment>(
|
||||
resource::list_full_for_user::<Deployment>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -93,7 +93,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Deployment(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Build>(
|
||||
resource::list_full_for_user::<Build>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -103,7 +103,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Build(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Repo>(
|
||||
resource::list_full_for_user::<Repo>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -113,7 +113,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Repo(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Procedure>(
|
||||
resource::list_full_for_user::<Procedure>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -123,7 +123,7 @@ async fn get_all_targets(
|
||||
.map(|resource| ResourceTarget::Procedure(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<Action>(
|
||||
resource::list_full_for_user::<Action>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
@@ -132,16 +132,6 @@ async fn get_all_targets(
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Action(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
@@ -203,18 +193,18 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
include_variables,
|
||||
} = self;
|
||||
let mut res = ResourcesToml::default();
|
||||
let all = AllResourcesById::load().await?;
|
||||
let id_to_tags = get_id_to_tags(None).await?;
|
||||
let ReadArgs { user } = args;
|
||||
for target in targets {
|
||||
match target {
|
||||
ResourceTarget::Alerter(id) => {
|
||||
let alerter = resource::get_check_permissions::<Alerter>(
|
||||
let mut alerter = get_check_permissions::<Alerter>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Alerter::replace_ids(&mut alerter);
|
||||
res.alerters.push(convert_resource::<Alerter>(
|
||||
alerter,
|
||||
false,
|
||||
@@ -223,16 +213,18 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
let sync = resource::get_check_permissions::<ResourceSync>(
|
||||
let mut sync = get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
if sync.config.file_contents.is_empty()
|
||||
&& (sync.config.files_on_host
|
||||
|| !sync.config.repo.is_empty())
|
||||
|| !sync.config.repo.is_empty()
|
||||
|| !sync.config.linked_repo.is_empty())
|
||||
{
|
||||
ResourceSync::replace_ids(&mut sync);
|
||||
res.resource_syncs.push(convert_resource::<ResourceSync>(
|
||||
sync,
|
||||
false,
|
||||
@@ -241,27 +233,14 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
ServerTemplate,
|
||||
>(&id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
res.server_templates.push(
|
||||
convert_resource::<ServerTemplate>(
|
||||
template,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let mut server = get_check_permissions::<Server>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Server::replace_ids(&mut server);
|
||||
res.servers.push(convert_resource::<Server>(
|
||||
server,
|
||||
false,
|
||||
@@ -270,14 +249,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
let mut builder =
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder, &all);
|
||||
let mut builder = get_check_permissions::<Builder>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Builder::replace_ids(&mut builder);
|
||||
res.builders.push(convert_resource::<Builder>(
|
||||
builder,
|
||||
false,
|
||||
@@ -286,13 +264,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
let mut build = resource::get_check_permissions::<Build>(
|
||||
let mut build = get_check_permissions::<Build>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Build::replace_ids(&mut build, &all);
|
||||
Build::replace_ids(&mut build);
|
||||
res.builds.push(convert_resource::<Build>(
|
||||
build,
|
||||
false,
|
||||
@@ -301,13 +279,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
let mut deployment = resource::get_check_permissions::<
|
||||
Deployment,
|
||||
>(
|
||||
&id, user, PermissionLevel::Read
|
||||
let mut deployment = get_check_permissions::<Deployment>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Deployment::replace_ids(&mut deployment, &all);
|
||||
Deployment::replace_ids(&mut deployment);
|
||||
res.deployments.push(convert_resource::<Deployment>(
|
||||
deployment,
|
||||
false,
|
||||
@@ -316,13 +294,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
let mut repo = resource::get_check_permissions::<Repo>(
|
||||
let mut repo = get_check_permissions::<Repo>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Repo::replace_ids(&mut repo, &all);
|
||||
Repo::replace_ids(&mut repo);
|
||||
res.repos.push(convert_resource::<Repo>(
|
||||
repo,
|
||||
false,
|
||||
@@ -331,13 +309,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
let mut stack = resource::get_check_permissions::<Stack>(
|
||||
let mut stack = get_check_permissions::<Stack>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Stack::replace_ids(&mut stack, &all);
|
||||
Stack::replace_ids(&mut stack);
|
||||
res.stacks.push(convert_resource::<Stack>(
|
||||
stack,
|
||||
false,
|
||||
@@ -346,13 +324,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
let mut procedure = resource::get_check_permissions::<
|
||||
Procedure,
|
||||
>(
|
||||
&id, user, PermissionLevel::Read
|
||||
let mut procedure = get_check_permissions::<Procedure>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Procedure::replace_ids(&mut procedure, &all);
|
||||
Procedure::replace_ids(&mut procedure);
|
||||
res.procedures.push(convert_resource::<Procedure>(
|
||||
procedure,
|
||||
false,
|
||||
@@ -361,13 +339,13 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
));
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
let mut action = resource::get_check_permissions::<Action>(
|
||||
let mut action = get_check_permissions::<Action>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Action::replace_ids(&mut action, &all);
|
||||
Action::replace_ids(&mut action);
|
||||
res.actions.push(convert_resource::<Action>(
|
||||
action,
|
||||
false,
|
||||
@@ -379,7 +357,7 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
};
|
||||
}
|
||||
|
||||
add_user_groups(user_groups, &mut res, &all, args)
|
||||
add_user_groups(user_groups, &mut res, args)
|
||||
.await
|
||||
.context("failed to add user groups")?;
|
||||
|
||||
@@ -408,7 +386,6 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
async fn add_user_groups(
|
||||
user_groups: Vec<String>,
|
||||
res: &mut ResourcesToml,
|
||||
all: &AllResourcesById,
|
||||
args: &ReadArgs,
|
||||
) -> anyhow::Result<()> {
|
||||
let user_groups = ListUserGroups {}
|
||||
@@ -420,7 +397,7 @@ async fn add_user_groups(
|
||||
user_groups.contains(&ug.name) || user_groups.contains(&ug.id)
|
||||
});
|
||||
let mut ug = Vec::with_capacity(user_groups.size_hint().0);
|
||||
convert_user_groups(user_groups, all, &mut ug).await?;
|
||||
convert_user_groups(user_groups, &mut ug).await?;
|
||||
res.user_groups = ug.into_iter().map(|ug| ug.1).collect();
|
||||
|
||||
Ok(())
|
||||
@@ -503,14 +480,6 @@ fn serialize_resources_toml(
|
||||
Builder::push_to_toml_string(builder, &mut toml)?;
|
||||
}
|
||||
|
||||
for server_template in resources.server_templates {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[server_template]]\n");
|
||||
ServerTemplate::push_to_toml_string(server_template, &mut toml)?;
|
||||
}
|
||||
|
||||
for resource_sync in resources.resource_syncs {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
@@ -523,22 +492,14 @@ fn serialize_resources_toml(
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[variable]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(variable, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize variables to toml")?,
|
||||
);
|
||||
toml.push_str(&variable_to_toml(variable)?);
|
||||
}
|
||||
|
||||
for user_group in &resources.user_groups {
|
||||
for user_group in resources.user_groups {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[user_group]]\n");
|
||||
toml.push_str(
|
||||
&toml_pretty::to_string(user_group, TOML_PRETTY_OPTIONS)
|
||||
.context("failed to serialize user_groups to toml")?,
|
||||
);
|
||||
toml.push_str(&user_group_to_toml(user_group)?);
|
||||
}
|
||||
|
||||
Ok(toml)
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{GetUpdate, ListUpdates, ListUpdatesResponse},
|
||||
entities::{
|
||||
@@ -14,21 +19,19 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Update, UpdateListItem},
|
||||
user::User,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{config::core_config, resource, state::db_client};
|
||||
use crate::{
|
||||
config::core_config,
|
||||
permission::{get_check_permissions, get_resource_ids_for_user},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
@@ -42,18 +45,17 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
let query = if user.admin || core_config().transparent_mode {
|
||||
self.query
|
||||
} else {
|
||||
let server_query =
|
||||
resource::get_resource_ids_for_user::<Server>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
let server_query = get_resource_ids_for_user::<Server>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Server", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Server" });
|
||||
|
||||
let deployment_query =
|
||||
resource::get_resource_ids_for_user::<Deployment>(user)
|
||||
get_resource_ids_for_user::<Deployment>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
@@ -62,38 +64,35 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Deployment" });
|
||||
|
||||
let stack_query =
|
||||
resource::get_resource_ids_for_user::<Stack>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
let stack_query = get_resource_ids_for_user::<Stack>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Stack", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Stack" });
|
||||
|
||||
let build_query =
|
||||
resource::get_resource_ids_for_user::<Build>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
let build_query = get_resource_ids_for_user::<Build>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Build", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Build" });
|
||||
|
||||
let repo_query =
|
||||
resource::get_resource_ids_for_user::<Repo>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
let repo_query = get_resource_ids_for_user::<Repo>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Repo", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Repo" });
|
||||
|
||||
let procedure_query =
|
||||
resource::get_resource_ids_for_user::<Procedure>(user)
|
||||
get_resource_ids_for_user::<Procedure>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
@@ -102,57 +101,43 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Procedure" });
|
||||
|
||||
let action_query =
|
||||
resource::get_resource_ids_for_user::<Action>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
|
||||
let builder_query =
|
||||
resource::get_resource_ids_for_user::<Builder>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query =
|
||||
resource::get_resource_ids_for_user::<Alerter>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let server_template_query =
|
||||
resource::get_resource_ids_for_user::<ServerTemplate>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ServerTemplate", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
|
||||
|
||||
let resource_sync_query =
|
||||
resource::get_resource_ids_for_user::<ResourceSync>(
|
||||
user,
|
||||
)
|
||||
let action_query = get_resource_ids_for_user::<Action>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ResourceSync", "target.id": { "$in": ids }
|
||||
"target.type": "Action", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
|
||||
.unwrap_or_else(|| doc! { "target.type": "Action" });
|
||||
|
||||
let builder_query = get_resource_ids_for_user::<Builder>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Builder", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Builder" });
|
||||
|
||||
let alerter_query = get_resource_ids_for_user::<Alerter>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "Alerter", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let resource_sync_query = get_resource_ids_for_user::<
|
||||
ResourceSync,
|
||||
>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ResourceSync", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ResourceSync" });
|
||||
|
||||
let mut query = self.query.unwrap_or_default();
|
||||
query.extend(doc! {
|
||||
@@ -166,7 +151,6 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
action_query,
|
||||
alerter_query,
|
||||
builder_query,
|
||||
server_template_query,
|
||||
resource_sync_query,
|
||||
]
|
||||
});
|
||||
@@ -245,90 +229,82 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
);
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get_check_permissions::<Server>(
|
||||
get_check_permissions::<Server>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
get_check_permissions::<Deployment>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::get_check_permissions::<Build>(
|
||||
get_check_permissions::<Build>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::get_check_permissions::<Repo>(
|
||||
get_check_permissions::<Repo>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::get_check_permissions::<Builder>(
|
||||
get_check_permissions::<Builder>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
get_check_permissions::<Alerter>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
get_check_permissions::<Procedure>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::get_check_permissions::<Action>(
|
||||
get_check_permissions::<Action>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
get_check_permissions::<ResourceSync>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::get_check_permissions::<Stack>(
|
||||
get_check_permissions::<Stack>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
@@ -1,4 +1,9 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::{
|
||||
FindUser, FindUserResponse, GetUsername, GetUsernameResponse,
|
||||
@@ -8,11 +13,6 @@ use komodo_client::{
|
||||
},
|
||||
entities::user::{UserConfig, admin_service_user},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
find::find_collect,
|
||||
mongodb::{bson::doc, options::FindOptions},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_user, state::db_client};
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::api::read::*;
|
||||
use mungos::{
|
||||
use database::mungos::{
|
||||
find::find_collect,
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
options::FindOptions,
|
||||
},
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
find::find_collect, mongodb::options::FindOptions,
|
||||
};
|
||||
use komodo_client::api::read::*;
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{find::find_collect, mongodb::options::FindOptions};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_variable, state::db_client};
|
||||
|
||||
219
bin/core/src/api/terminal.rs
Normal file
219
bin/core/src/api/terminal.rs
Normal file
@@ -0,0 +1,219 @@
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use komodo_client::{
|
||||
api::terminal::*,
|
||||
entities::{
|
||||
deployment::Deployment, permission::PermissionLevel,
|
||||
server::Server, stack::Stack, user::User,
|
||||
},
|
||||
};
|
||||
use serror::Json;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::periphery_client,
|
||||
permission::get_check_permissions, resource::get,
|
||||
state::stack_status_cache,
|
||||
};
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/execute", post(execute_terminal))
|
||||
.route("/execute/container", post(execute_container_exec))
|
||||
.route("/execute/deployment", post(execute_deployment_exec))
|
||||
.route("/execute/stack", post(execute_stack_exec))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
// =================
|
||||
// ExecuteTerminal
|
||||
// =================
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server,
|
||||
terminal,
|
||||
)
|
||||
)]
|
||||
async fn execute_terminal(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteTerminalBody {
|
||||
server,
|
||||
terminal,
|
||||
command,
|
||||
}): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal/execute request | user: {}", user.username);
|
||||
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let stream = periphery_client(&server)
|
||||
.await?
|
||||
.execute_terminal(terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on periphery")?;
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// ======================
|
||||
// ExecuteContainerExec
|
||||
// ======================
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteContainerExec",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
recreate = format!("{recreate:?}"),
|
||||
)
|
||||
)]
|
||||
async fn execute_container_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteContainerExecBody {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
command,
|
||||
recreate,
|
||||
}): Json<ExecuteContainerExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("ExecuteContainerExec request | user: {}", user.username);
|
||||
|
||||
let server = get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command, recreate)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// =======================
|
||||
// ExecuteDeploymentExec
|
||||
// =======================
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteDeploymentExec",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment,
|
||||
shell,
|
||||
recreate = format!("{recreate:?}"),
|
||||
)
|
||||
)]
|
||||
async fn execute_deployment_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteDeploymentExecBody {
|
||||
deployment,
|
||||
shell,
|
||||
command,
|
||||
recreate,
|
||||
}): Json<ExecuteDeploymentExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("ExecuteDeploymentExec request | user: {}", user.username);
|
||||
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&deployment,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&deployment.config.server_id).await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(deployment.name, shell, command, recreate)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
|
||||
// ==================
|
||||
// ExecuteStackExec
|
||||
// ==================
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteStackExec",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack,
|
||||
service,
|
||||
shell,
|
||||
recreate = format!("{recreate:?}"),
|
||||
)
|
||||
)]
|
||||
async fn execute_stack_exec(
|
||||
Extension(user): Extension<User>,
|
||||
Json(ExecuteStackExecBody {
|
||||
stack,
|
||||
service,
|
||||
shell,
|
||||
command,
|
||||
recreate,
|
||||
}): Json<ExecuteStackExecBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("ExecuteStackExec request | user: {}", user.username);
|
||||
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
&user,
|
||||
PermissionLevel::Read.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let server = get::<Server>(&stack.config.server_id).await?;
|
||||
|
||||
let container = stack_status_cache()
|
||||
.get(&stack.id)
|
||||
.await
|
||||
.context("could not get stack status")?
|
||||
.curr
|
||||
.services
|
||||
.iter()
|
||||
.find(|s| s.service == service)
|
||||
.context("could not find service")?
|
||||
.container
|
||||
.as_ref()
|
||||
.context("could not find service container")?
|
||||
.name
|
||||
.clone();
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_container_exec(container, shell, command, recreate)
|
||||
.await
|
||||
.context(
|
||||
"Failed to execute container exec command on periphery",
|
||||
)?;
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream))
|
||||
}
|
||||
@@ -1,26 +1,32 @@
|
||||
use std::{collections::VecDeque, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{Extension, Json, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Json, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_bson,
|
||||
};
|
||||
use derive_variants::EnumVariants;
|
||||
use komodo_client::entities::random_string;
|
||||
use komodo_client::{
|
||||
api::user::*,
|
||||
entities::{api_key::ApiKey, komodo_timestamp, user::User},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request,
|
||||
helpers::{query::get_user, random_string},
|
||||
state::db_client,
|
||||
auth::auth_request, helpers::query::get_user, state::db_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub struct UserArgs {
|
||||
pub user: User,
|
||||
}
|
||||
@@ -43,10 +49,22 @@ enum UserRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
#[instrument(name = "UserHandler", level = "debug", skip(user))]
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: UserRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<UserRequest>,
|
||||
@@ -69,11 +87,6 @@ async fn handler(
|
||||
const RECENTLY_VIEWED_MAX: usize = 10;
|
||||
|
||||
impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
#[instrument(
|
||||
name = "PushRecentlyViewed",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -98,7 +111,7 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(update),
|
||||
database::mungos::update::Update::Set(update),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
@@ -111,11 +124,6 @@ impl Resolve<UserArgs> for PushRecentlyViewed {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
#[instrument(
|
||||
name = "SetLastSeenUpdate",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -123,7 +131,7 @@ impl Resolve<UserArgs> for SetLastSeenUpdate {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user.id,
|
||||
mungos::update::Update::Set(doc! {
|
||||
database::mungos::update::Update::Set(doc! {
|
||||
"last_update_view": komodo_timestamp()
|
||||
}),
|
||||
None,
|
||||
@@ -138,7 +146,11 @@ const SECRET_LENGTH: usize = 40;
|
||||
const BCRYPT_COST: u32 = 10;
|
||||
|
||||
impl Resolve<UserArgs> for CreateApiKey {
|
||||
#[instrument(name = "CreateApiKey", level = "debug", skip(user))]
|
||||
#[instrument(
|
||||
"CreateApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
@@ -168,7 +180,11 @@ impl Resolve<UserArgs> for CreateApiKey {
|
||||
}
|
||||
|
||||
impl Resolve<UserArgs> for DeleteApiKey {
|
||||
#[instrument(name = "DeleteApiKey", level = "debug", skip(user))]
|
||||
#[instrument(
|
||||
"DeleteApiKey",
|
||||
skip_all,
|
||||
fields(operator = user.id)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
UserArgs { user }: &UserArgs,
|
||||
|
||||
@@ -6,45 +6,64 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAction {
|
||||
#[instrument(name = "CreateAction", skip(user))]
|
||||
#[instrument(
|
||||
"CreateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Action>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAction {
|
||||
#[instrument(name = "CopyAction", skip(user))]
|
||||
#[instrument(
|
||||
"CopyAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.name,
|
||||
copy_action = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
let Action { config, .. } =
|
||||
resource::get_check_permissions::<Action>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Action>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
let Action { config, .. } = get_check_permissions::<Action>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Action>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAction {
|
||||
#[instrument(name = "UpdateAction", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -54,7 +73,15 @@ impl Resolve<WriteArgs> for UpdateAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAction {
|
||||
#[instrument(name = "RenameAction", skip(user))]
|
||||
#[instrument(
|
||||
"RenameAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -64,8 +91,18 @@ impl Resolve<WriteArgs> for RenameAction {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAction {
|
||||
#[instrument(name = "DeleteAction", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteAction",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
action = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Action> {
|
||||
Ok(resource::delete::<Action>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
41
bin/core/src/api/write/alert.rs
Normal file
41
bin/core/src/api/write/alert.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, oid::ObjectId};
|
||||
use komodo_client::{api::write::CloseAlert, entities::NoData};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
impl Resolve<WriteArgs> for CloseAlert {
|
||||
#[instrument(
|
||||
"CloseAlert",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
alert_id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
db_client()
|
||||
.alerts
|
||||
.update_one(
|
||||
doc! { "_id": ObjectId::from_str(&self.id)? },
|
||||
doc! { "$set": { "resolved": true } },
|
||||
)
|
||||
.await
|
||||
.context("Failed to close Alert on database")?;
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
@@ -6,55 +6,81 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateAlerter {
|
||||
#[instrument(name = "CreateAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"CreateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Alerter>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyAlerter {
|
||||
#[instrument(name = "CopyAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"CopyAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.name,
|
||||
copy_alerter = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
let Alerter { config, .. } =
|
||||
resource::get_check_permissions::<Alerter>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Alerter>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
let Alerter { config, .. } = get_check_permissions::<Alerter>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Alerter>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAlerter {
|
||||
#[instrument(name = "DeleteAlerter", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Alerter> {
|
||||
Ok(resource::delete::<Alerter>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Alerter>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
#[instrument(name = "UpdateAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -67,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateAlerter {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameAlerter {
|
||||
#[instrument(name = "RenameAlerter", skip(user))]
|
||||
#[instrument(
|
||||
"RenameAlerter",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
alerter = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,91 +1,119 @@
|
||||
use std::{path::PathBuf, str::FromStr, time::Duration};
|
||||
use std::{path::PathBuf, time::Duration};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::to_document;
|
||||
use database::{
|
||||
mongo_indexed::doc, mungos::mongodb::bson::oid::ObjectId,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
CloneArgs, FileContents, NoData, Operation, all_logs_success,
|
||||
build::{Build, BuildInfo, PartialBuildConfig},
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
build::{Build, BuildInfo},
|
||||
builder::{Builder, BuilderConfig},
|
||||
config::core::CoreConfig,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::mongodb::bson::to_document;
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::{
|
||||
PeripheryClient,
|
||||
api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
},
|
||||
use periphery_client::api::build::{
|
||||
GetDockerfileContentsOnHost, WriteDockerfileContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
use tokio::fs;
|
||||
|
||||
use crate::{
|
||||
config::core_config,
|
||||
connection::PeripheryConnectionArgs,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
query::get_server_with_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
periphery::PeripheryClient,
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{db_client, github_client},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuild {
|
||||
#[instrument(name = "CreateBuild", skip(user))]
|
||||
#[instrument(
|
||||
"CreateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuild {
|
||||
#[instrument(name = "CopyBuild", skip(user))]
|
||||
#[instrument(
|
||||
"CopyBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.name,
|
||||
copy_build = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
let Build { mut config, .. } =
|
||||
resource::get_check_permissions::<Build>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
let Build { mut config, .. } = get_check_permissions::<Build>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
// reset version to 0.0.0
|
||||
config.version = Default::default();
|
||||
Ok(
|
||||
resource::create::<Build>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Build>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuild {
|
||||
#[instrument(name = "DeleteBuild", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Build> {
|
||||
Ok(resource::delete::<Build>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuild {
|
||||
#[instrument(name = "UpdateBuild", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -95,7 +123,15 @@ impl Resolve<WriteArgs> for UpdateBuild {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuild {
|
||||
#[instrument(name = "RenameBuild", skip(user))]
|
||||
#[instrument(
|
||||
"RenameBuild",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
build = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -105,16 +141,26 @@ impl Resolve<WriteArgs> for RenameBuild {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
#[instrument(name = "WriteBuildFileContents", skip(args))]
|
||||
#[instrument(
|
||||
"WriteBuildFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
build = self.build,
|
||||
)
|
||||
)]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Update> {
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
&args.user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !build.config.files_on_host && build.config.repo.is_empty() {
|
||||
if !build.config.files_on_host
|
||||
&& build.config.repo.is_empty()
|
||||
&& build.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Build is not configured to use Files on Host or Git Repo, can't write dockerfile contents"
|
||||
).into());
|
||||
@@ -174,6 +220,7 @@ impl Resolve<WriteArgs> for WriteBuildFileContents {
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteDockerfileContentsGit", skip_all)]
|
||||
async fn write_dockerfile_contents_git(
|
||||
req: WriteBuildFileContents,
|
||||
args: &WriteArgs,
|
||||
@@ -182,8 +229,18 @@ async fn write_dockerfile_contents_git(
|
||||
) -> serror::Result<Update> {
|
||||
let WriteBuildFileContents { build: _, contents } = req;
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let root = clone_args.unique_path(&core_config().repo_directory)?;
|
||||
let mut repo_args: RepoExecutionArgs = if !build
|
||||
.config
|
||||
.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
(&crate::resource::get::<Repo>(&build.config.linked_repo).await?)
|
||||
.into()
|
||||
} else {
|
||||
(&build).into()
|
||||
};
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let build_path = build
|
||||
.config
|
||||
@@ -206,22 +263,22 @@ async fn write_dockerfile_contents_git(
|
||||
})?;
|
||||
}
|
||||
|
||||
let access_token = if let Some(account) = &repo_args.account {
|
||||
git_token(&repo_args.provider, account, |https| repo_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", repo_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
let access_token = if let Some(account) = &clone_args.account {
|
||||
git_token(&clone_args.provider, account, |https| clone_args.https = https)
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {account}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&clone_args,
|
||||
&repo_args,
|
||||
access_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
@@ -235,8 +292,35 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) =
|
||||
fs::write(&full_path, &contents).await.with_context(|| {
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) = secret_file::write_async(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("Failed to write dockerfile contents to {full_path:?}")
|
||||
})
|
||||
{
|
||||
@@ -260,7 +344,7 @@ async fn write_dockerfile_contents_git(
|
||||
&format!("{}: Commit Dockerfile", args.user.username),
|
||||
&root,
|
||||
&build_path.join(&dockerfile_path),
|
||||
&build.config.branch,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -283,24 +367,29 @@ async fn write_dockerfile_contents_git(
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
#[instrument(
|
||||
name = "RefreshBuildCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// build should be able to do this.
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
let build = get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !build.config.files_on_host
|
||||
&& !build.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&build.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (
|
||||
remote_path,
|
||||
remote_contents,
|
||||
@@ -319,71 +408,20 @@ impl Resolve<WriteArgs> for RefreshBuildCache {
|
||||
(None, None, Some(format_serror(&e.into())), None, None)
|
||||
}
|
||||
}
|
||||
} else if !build.config.repo.is_empty() {
|
||||
// ================
|
||||
// REPO BASED BUILD
|
||||
// ================
|
||||
if build.config.git_provider.is_empty() {
|
||||
} else if let Some(repo) = &repo {
|
||||
let Some(res) = get_git_remote(&build, repo.into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
}
|
||||
let config = core_config();
|
||||
|
||||
let mut clone_args: CloneArgs = (&build).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let relative_path = PathBuf::from_str(&build.config.build_path)
|
||||
.context("Invalid build path")?
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
let (contents, error) = match fs::read_to_string(&full_path)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to read dockerfile contents at {full_path:?}"
|
||||
)
|
||||
}) {
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
res
|
||||
} else if !build.config.repo.is_empty() {
|
||||
let Some(res) = get_git_remote(&build, (&build).into()).await?
|
||||
else {
|
||||
// Nothing to do here
|
||||
return Ok(NoData {});
|
||||
};
|
||||
|
||||
(
|
||||
Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
hash,
|
||||
message,
|
||||
)
|
||||
res
|
||||
} else {
|
||||
// =============
|
||||
// UI BASED FILE
|
||||
@@ -435,13 +473,26 @@ async fn get_on_host_periphery(
|
||||
Err(anyhow!("Files on host doesn't work with AWS builder"))
|
||||
}
|
||||
BuilderConfig::Url(config) => {
|
||||
// TODO: Ensure connection is actually established.
|
||||
// Builder id no good because it may be active for multiple connections.
|
||||
let periphery = PeripheryClient::new(
|
||||
config.address,
|
||||
config.passkey,
|
||||
Duration::from_secs(3),
|
||||
);
|
||||
periphery.health_check().await?;
|
||||
Ok(periphery)
|
||||
PeripheryConnectionArgs::from_url_builder(
|
||||
&ObjectId::new().to_hex(),
|
||||
&config,
|
||||
),
|
||||
config.insecure_tls,
|
||||
)
|
||||
.await?;
|
||||
// Poll for connection to be estalished
|
||||
let mut err = None;
|
||||
for _ in 0..10 {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
match periphery.health_check().await {
|
||||
Ok(_) => return Ok(periphery),
|
||||
Err(e) => err = Some(e),
|
||||
};
|
||||
}
|
||||
Err(err.context("Missing error")?)
|
||||
}
|
||||
BuilderConfig::Server(config) => {
|
||||
if config.server_id.is_empty() {
|
||||
@@ -456,7 +507,7 @@ async fn get_on_host_periphery(
|
||||
"Builder server is disabled or not reachable"
|
||||
));
|
||||
};
|
||||
periphery_client(&server)
|
||||
periphery_client(&server).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -476,200 +527,62 @@ async fn get_on_host_dockerfile(
|
||||
.await
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuildWebhook {
|
||||
#[instrument(name = "CreateBuildWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if build.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&build.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !build.config.webhook_enabled {
|
||||
UpdateBuild {
|
||||
id: build.id,
|
||||
config: PartialBuildConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update build to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuildWebhook {
|
||||
#[instrument(name = "DeleteBuildWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteBuildWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let build = resource::get_check_permissions::<Build>(
|
||||
&self.build,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if build.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if build.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't delete webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = build.config.repo.split('/');
|
||||
let owner = split.next().context("Build repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Build repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = format!("{host}/listener/github/build/{}", build.id);
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
async fn get_git_remote(
|
||||
build: &Build,
|
||||
mut clone_args: RepoExecutionArgs,
|
||||
) -> anyhow::Result<
|
||||
Option<(
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
Option<String>,
|
||||
)>,
|
||||
> {
|
||||
if clone_args.provider.is_empty() {
|
||||
// Nothing to do here
|
||||
return Ok(None);
|
||||
}
|
||||
let config = core_config();
|
||||
let repo_path = clone_args.unique_path(&config.repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
clone_args.https = https
|
||||
})
|
||||
.await
|
||||
.with_context(
|
||||
|| format!("Failed to get git token in call to db. Stopping run. | {} | {username}", clone_args.provider),
|
||||
)?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (res, _) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&config.repo_directory,
|
||||
access_token,
|
||||
)
|
||||
.await
|
||||
.context("failed to clone build repo")?;
|
||||
|
||||
let relative_path = PathBuf::from(&build.config.build_path)
|
||||
.join(&build.config.dockerfile_path);
|
||||
|
||||
let full_path = repo_path.join(&relative_path);
|
||||
let (contents, error) =
|
||||
match fs::read_to_string(&full_path).await.with_context(|| {
|
||||
format!("Failed to read dockerfile contents at {full_path:?}")
|
||||
}) {
|
||||
Ok(contents) => (Some(contents), None),
|
||||
Err(e) => (None, Some(format_serror(&e.into()))),
|
||||
};
|
||||
Ok(Some((
|
||||
Some(relative_path.display().to_string()),
|
||||
contents,
|
||||
error,
|
||||
res.commit_hash,
|
||||
res.commit_message,
|
||||
)))
|
||||
}
|
||||
|
||||
@@ -6,55 +6,81 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateBuilder {
|
||||
#[instrument(name = "CreateBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"CreateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Builder>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyBuilder {
|
||||
#[instrument(name = "CopyBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"CopyBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.name,
|
||||
copy_builder = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
let Builder { config, .. } =
|
||||
resource::get_check_permissions::<Builder>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Builder>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
let Builder { config, .. } = get_check_permissions::<Builder>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Builder>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteBuilder {
|
||||
#[instrument(name = "DeleteBuilder", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Builder> {
|
||||
Ok(resource::delete::<Builder>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Builder>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
#[instrument(name = "UpdateBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -67,7 +93,15 @@ impl Resolve<WriteArgs> for UpdateBuilder {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameBuilder {
|
||||
#[instrument(name = "RenameBuilder", skip(user))]
|
||||
#[instrument(
|
||||
"RenameBuilder",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
builder = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -11,11 +12,10 @@ use komodo_client::{
|
||||
komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
server::{Server, ServerState},
|
||||
to_komodo_name,
|
||||
to_container_compatible_name,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::doc};
|
||||
use periphery_client::api::{self, container::InspectContainer};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -25,6 +25,7 @@ use crate::{
|
||||
query::get_deployment_state,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, db_client, server_status_cache},
|
||||
};
|
||||
@@ -32,48 +33,78 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeployment {
|
||||
#[instrument(name = "CreateDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"CreateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, self.config, user)
|
||||
.await?,
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
self.config,
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyDeployment {
|
||||
#[instrument(name = "CopyDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"CopyDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.name,
|
||||
copy_deployment = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
let Deployment { config, .. } =
|
||||
resource::get_check_permissions::<Deployment>(
|
||||
get_check_permissions::<Deployment>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
resource::create::<Deployment>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
#[instrument(name = "CreateDeploymentFromContainer", skip(user))]
|
||||
#[instrument(
|
||||
"CreateDeploymentFromContainer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
deployment = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Read.inspect().attach(),
|
||||
)
|
||||
.await?;
|
||||
let cache = server_status_cache()
|
||||
@@ -88,7 +119,8 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
let container = periphery_client(&server)?
|
||||
let container = periphery_client(&server)
|
||||
.await?
|
||||
.request(InspectContainer {
|
||||
name: self.name.clone(),
|
||||
})
|
||||
@@ -152,25 +184,38 @@ impl Resolve<WriteArgs> for CreateDeploymentFromContainer {
|
||||
});
|
||||
}
|
||||
|
||||
Ok(
|
||||
resource::create::<Deployment>(&self.name, config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Deployment>(&self.name, config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDeployment {
|
||||
#[instrument(name = "DeleteDeployment", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Deployment> {
|
||||
Ok(resource::delete::<Deployment>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Deployment>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDeployment {
|
||||
#[instrument(name = "UpdateDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -183,15 +228,23 @@ impl Resolve<WriteArgs> for UpdateDeployment {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameDeployment {
|
||||
#[instrument(name = "RenameDeployment", skip(user))]
|
||||
#[instrument(
|
||||
"RenameDeployment",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
deployment = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let deployment = resource::get_check_permissions::<Deployment>(
|
||||
let deployment = get_check_permissions::<Deployment>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -206,9 +259,10 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_komodo_name(&self.name);
|
||||
let name = to_container_compatible_name(&self.name);
|
||||
|
||||
let container_state = get_deployment_state(&deployment).await?;
|
||||
let container_state =
|
||||
get_deployment_state(&deployment.id).await?;
|
||||
|
||||
if container_state == DeploymentState::Unknown {
|
||||
return Err(
|
||||
@@ -225,7 +279,7 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
update_one_by_id(
|
||||
&db_client().deployments,
|
||||
&deployment.id,
|
||||
mungos::update::Update::Set(
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
@@ -236,7 +290,8 @@ impl Resolve<WriteArgs> for RenameDeployment {
|
||||
if container_state != DeploymentState::NotDeployed {
|
||||
let server =
|
||||
resource::get::<Server>(&deployment.config.server_id).await?;
|
||||
let log = periphery_client(&server)?
|
||||
let log = periphery_client(&server)
|
||||
.await?
|
||||
.request(api::container::RenameContainer {
|
||||
curr_name: deployment.name.clone(),
|
||||
new_name: name.clone(),
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
use anyhow::anyhow;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateDescription, UpdateDescriptionResponse},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, server_template::ServerTemplate,
|
||||
stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDescription {
|
||||
#[instrument(name = "UpdateDescription", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDescriptionResponse> {
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update description of System resource target"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_description::<Server>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_description::<Deployment>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_description::<Build>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_description::<Repo>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_description::<Builder>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_description::<Alerter>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_description::<Procedure>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::update_description::<Action>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::update_description::<ServerTemplate>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_description::<ResourceSync>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_description::<Stack>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateDescriptionResponse {})
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,35 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use strum::Display;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::auth::auth_request;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
mod build;
|
||||
mod builder;
|
||||
mod deployment;
|
||||
mod description;
|
||||
mod onboarding_key;
|
||||
mod permissions;
|
||||
mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod resource;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
mod sync;
|
||||
@@ -41,13 +46,14 @@ pub struct WriteArgs {
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EnumVariants,
|
||||
)]
|
||||
#[variant_derive(Debug)]
|
||||
#[variant_derive(Debug, Display)]
|
||||
#[args(WriteArgs)]
|
||||
#[response(Response)]
|
||||
#[error(serror::Error)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum WriteRequest {
|
||||
// ==== USER ====
|
||||
CreateLocalUser(CreateLocalUser),
|
||||
UpdateUserUsername(UpdateUserUsername),
|
||||
UpdateUserPassword(UpdateUserPassword),
|
||||
DeleteUser(DeleteUser),
|
||||
@@ -65,6 +71,7 @@ pub enum WriteRequest {
|
||||
AddUserToUserGroup(AddUserToUserGroup),
|
||||
RemoveUserFromUserGroup(RemoveUserFromUserGroup),
|
||||
SetUsersInUserGroup(SetUsersInUserGroup),
|
||||
SetEveryoneUserGroup(SetEveryoneUserGroup),
|
||||
|
||||
// ==== PERMISSIONS ====
|
||||
UpdateUserAdmin(UpdateUserAdmin),
|
||||
@@ -72,15 +79,30 @@ pub enum WriteRequest {
|
||||
UpdatePermissionOnResourceType(UpdatePermissionOnResourceType),
|
||||
UpdatePermissionOnTarget(UpdatePermissionOnTarget),
|
||||
|
||||
// ==== DESCRIPTION ====
|
||||
UpdateDescription(UpdateDescription),
|
||||
// ==== RESOURCE ====
|
||||
UpdateResourceMeta(UpdateResourceMeta),
|
||||
|
||||
// ==== SERVER ====
|
||||
CreateServer(CreateServer),
|
||||
CopyServer(CopyServer),
|
||||
DeleteServer(DeleteServer),
|
||||
UpdateServer(UpdateServer),
|
||||
RenameServer(RenameServer),
|
||||
CreateNetwork(CreateNetwork),
|
||||
CreateTerminal(CreateTerminal),
|
||||
DeleteTerminal(DeleteTerminal),
|
||||
DeleteAllTerminals(DeleteAllTerminals),
|
||||
UpdateServerPublicKey(UpdateServerPublicKey),
|
||||
RotateServerKeys(RotateServerKeys),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
|
||||
// ==== DEPLOYMENT ====
|
||||
CreateDeployment(CreateDeployment),
|
||||
@@ -98,8 +120,6 @@ pub enum WriteRequest {
|
||||
RenameBuild(RenameBuild),
|
||||
WriteBuildFileContents(WriteBuildFileContents),
|
||||
RefreshBuildCache(RefreshBuildCache),
|
||||
CreateBuildWebhook(CreateBuildWebhook),
|
||||
DeleteBuildWebhook(DeleteBuildWebhook),
|
||||
|
||||
// ==== BUILDER ====
|
||||
CreateBuilder(CreateBuilder),
|
||||
@@ -108,13 +128,6 @@ pub enum WriteRequest {
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
RenameBuilder(RenameBuilder),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
CreateServerTemplate(CreateServerTemplate),
|
||||
CopyServerTemplate(CopyServerTemplate),
|
||||
DeleteServerTemplate(DeleteServerTemplate),
|
||||
UpdateServerTemplate(UpdateServerTemplate),
|
||||
RenameServerTemplate(RenameServerTemplate),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
@@ -122,8 +135,6 @@ pub enum WriteRequest {
|
||||
UpdateRepo(UpdateRepo),
|
||||
RenameRepo(RenameRepo),
|
||||
RefreshRepoCache(RefreshRepoCache),
|
||||
CreateRepoWebhook(CreateRepoWebhook),
|
||||
DeleteRepoWebhook(DeleteRepoWebhook),
|
||||
|
||||
// ==== ALERTER ====
|
||||
CreateAlerter(CreateAlerter),
|
||||
@@ -155,26 +166,12 @@ pub enum WriteRequest {
|
||||
WriteSyncFileContents(WriteSyncFileContents),
|
||||
CommitSync(CommitSync),
|
||||
RefreshResourceSyncPending(RefreshResourceSyncPending),
|
||||
CreateSyncWebhook(CreateSyncWebhook),
|
||||
DeleteSyncWebhook(DeleteSyncWebhook),
|
||||
|
||||
// ==== STACK ====
|
||||
CreateStack(CreateStack),
|
||||
CopyStack(CopyStack),
|
||||
DeleteStack(DeleteStack),
|
||||
UpdateStack(UpdateStack),
|
||||
RenameStack(RenameStack),
|
||||
WriteStackFileContents(WriteStackFileContents),
|
||||
RefreshStackCache(RefreshStackCache),
|
||||
CreateStackWebhook(CreateStackWebhook),
|
||||
DeleteStackWebhook(DeleteStackWebhook),
|
||||
|
||||
// ==== TAG ====
|
||||
CreateTag(CreateTag),
|
||||
DeleteTag(DeleteTag),
|
||||
RenameTag(RenameTag),
|
||||
UpdateTagColor(UpdateTagColor),
|
||||
UpdateTagsOnResource(UpdateTagsOnResource),
|
||||
|
||||
// ==== VARIABLE ====
|
||||
CreateVariable(CreateVariable),
|
||||
@@ -183,21 +180,42 @@ pub enum WriteRequest {
|
||||
UpdateVariableIsSecret(UpdateVariableIsSecret),
|
||||
DeleteVariable(DeleteVariable),
|
||||
|
||||
// ==== PROVIDERS ====
|
||||
// ==== PROVIDER ====
|
||||
CreateGitProviderAccount(CreateGitProviderAccount),
|
||||
UpdateGitProviderAccount(UpdateGitProviderAccount),
|
||||
DeleteGitProviderAccount(DeleteGitProviderAccount),
|
||||
CreateDockerRegistryAccount(CreateDockerRegistryAccount),
|
||||
UpdateDockerRegistryAccount(UpdateDockerRegistryAccount),
|
||||
DeleteDockerRegistryAccount(DeleteDockerRegistryAccount),
|
||||
|
||||
// ==== ONBOARDING KEY ====
|
||||
CreateOnboardingKey(CreateOnboardingKey),
|
||||
UpdateOnboardingKey(UpdateOnboardingKey),
|
||||
DeleteOnboardingKey(DeleteOnboardingKey),
|
||||
|
||||
// ==== ALERT ====
|
||||
CloseAlert(CloseAlert),
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: WriteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<WriteRequest>,
|
||||
@@ -208,38 +226,25 @@ async fn handler(
|
||||
.await
|
||||
.context("failure in spawned task");
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} spawn error: {e:#}");
|
||||
}
|
||||
|
||||
res?
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "WriteRequest",
|
||||
skip(user, request),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
request = format!("{:?}", request.extract_variant())
|
||||
)
|
||||
)]
|
||||
async fn task(
|
||||
req_id: Uuid,
|
||||
request: WriteRequest,
|
||||
user: User,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
info!("/write request | user: {}", user.username);
|
||||
|
||||
let timer = Instant::now();
|
||||
let variant = request.extract_variant();
|
||||
info!("/write request | {variant} | user: {}", user.username);
|
||||
|
||||
let res = request.resolve(&WriteArgs { user }).await;
|
||||
|
||||
if let Err(e) = &res {
|
||||
warn!("/write request {req_id} error: {:#}", e.error);
|
||||
warn!(
|
||||
"/write request {req_id} | {variant} | error: {:#}",
|
||||
e.error
|
||||
);
|
||||
}
|
||||
|
||||
let elapsed = timer.elapsed();
|
||||
debug!("/write request {req_id} | resolve time: {elapsed:?}");
|
||||
|
||||
res.map(|res| res.0)
|
||||
}
|
||||
|
||||
200
bin/core/src/api/write/onboarding_key.rs
Normal file
200
bin/core/src/api/write/onboarding_key.rs
Normal file
@@ -0,0 +1,200 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{Document, doc};
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CreateOnboardingKey, CreateOnboardingKeyResponse,
|
||||
DeleteOnboardingKey, DeleteOnboardingKeyResponse,
|
||||
UpdateOnboardingKey, UpdateOnboardingKeyResponse,
|
||||
},
|
||||
entities::{
|
||||
komodo_timestamp, onboarding_key::OnboardingKey, random_string,
|
||||
},
|
||||
};
|
||||
use noise::key::EncodedKeyPair;
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCode, AddStatusCodeError};
|
||||
|
||||
use crate::{api::write::WriteArgs, state::db_client};
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for CreateOnboardingKey {
|
||||
#[instrument(
|
||||
"CreateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
name = self.name,
|
||||
expires = self.expires,
|
||||
tags = format!("{:?}", self.tags),
|
||||
copy_server = self.copy_server,
|
||||
create_builder = self.create_builder,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<CreateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let private_key = if let Some(private_key) = self.private_key {
|
||||
private_key
|
||||
} else {
|
||||
format!("O-{}", random_string(30))
|
||||
};
|
||||
let public_key = EncodedKeyPair::from_private_key(&private_key)?
|
||||
.public
|
||||
.into_inner();
|
||||
let onboarding_key = OnboardingKey {
|
||||
public_key,
|
||||
name: self.name,
|
||||
enabled: true,
|
||||
onboarded: Default::default(),
|
||||
created_at: komodo_timestamp(),
|
||||
expires: self.expires,
|
||||
tags: self.tags,
|
||||
copy_server: self.copy_server,
|
||||
create_builder: self.create_builder,
|
||||
};
|
||||
let db = db_client();
|
||||
// Create the key
|
||||
db.onboarding_keys
|
||||
.insert_one(&onboarding_key)
|
||||
.await
|
||||
.context(
|
||||
"Failed to create Server onboarding key on database",
|
||||
)?;
|
||||
let created = db
|
||||
.onboarding_keys
|
||||
.find_one(doc! { "public_key": &onboarding_key.public_key })
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context(
|
||||
"No Server onboarding key found on database after create",
|
||||
)?;
|
||||
Ok(CreateOnboardingKeyResponse {
|
||||
private_key,
|
||||
created,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateOnboardingKey {
|
||||
#[instrument(
|
||||
"UpdateOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
update = format!("{:?}", self),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
|
||||
// No changes
|
||||
if self.is_none() {
|
||||
return db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
let mut update = Document::new();
|
||||
|
||||
if let Some(enabled) = self.enabled {
|
||||
update.insert("enabled", enabled);
|
||||
}
|
||||
|
||||
if let Some(name) = self.name {
|
||||
update.insert("name", name);
|
||||
}
|
||||
|
||||
if let Some(expires) = self.expires {
|
||||
update.insert("expires", expires);
|
||||
}
|
||||
|
||||
if let Some(tags) = self.tags {
|
||||
update.insert("tags", tags);
|
||||
}
|
||||
|
||||
if let Some(copy_server) = self.copy_server {
|
||||
update.insert("copy_server", copy_server);
|
||||
}
|
||||
|
||||
if let Some(create_builder) = self.create_builder {
|
||||
update.insert("create_builder", create_builder);
|
||||
}
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.update_one(query.clone(), doc! { "$set": update })
|
||||
.await
|
||||
.context("Failed to update onboarding key on database")?;
|
||||
|
||||
db_client()
|
||||
.onboarding_keys
|
||||
.find_one(query)
|
||||
.await
|
||||
.context("Failed to query database for onboarding key")?
|
||||
.context("No matching onboarding key found")
|
||||
.status_code(StatusCode::NOT_FOUND)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteOnboardingKey {
|
||||
#[instrument(
|
||||
"DeleteOnboardingKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
public_key = self.public_key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<DeleteOnboardingKeyResponse> {
|
||||
if !admin.admin {
|
||||
return Err(
|
||||
anyhow!("This call is admin only")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
let db = db_client();
|
||||
let query = doc! { "public_key": &self.public_key };
|
||||
let creation_key = db
|
||||
.onboarding_keys
|
||||
.find_one(query.clone())
|
||||
.await
|
||||
.context("Failed to query database for Server onboarding keys")?
|
||||
.context("Server onboarding key matching provided public key not found")
|
||||
.status_code(StatusCode::NOT_FOUND)?;
|
||||
db.onboarding_keys.delete_one(query).await.context(
|
||||
"Failed to delete Server onboarding key from database",
|
||||
)?;
|
||||
Ok(creation_key)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,14 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId, to_bson},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use derive_variants::ExtractVariant as _;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -8,13 +16,6 @@ use komodo_client::{
|
||||
permission::{UserTarget, UserTargetVariant},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{find_one_by_id, update_one_by_id},
|
||||
mongodb::{
|
||||
bson::{Document, doc, oid::ObjectId},
|
||||
options::UpdateOptions,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{helpers::query::get_user, state::db_client};
|
||||
@@ -22,7 +23,15 @@ use crate::{helpers::query::get_user, state::db_client};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
#[instrument(name = "UpdateUserAdmin", skip(super_admin))]
|
||||
#[instrument(
|
||||
"UpdateUserAdmin",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = super_admin.id,
|
||||
target_user = self.user_id,
|
||||
admin = self.admin,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: super_admin }: &WriteArgs,
|
||||
@@ -60,11 +69,25 @@ impl Resolve<WriteArgs> for UpdateUserAdmin {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
#[instrument(name = "UpdateUserBasePermissions", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdateUserBasePermissions",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
target_user = self.user_id,
|
||||
enabled = self.enabled,
|
||||
create_servers = self.create_servers,
|
||||
create_builds = self.create_builds,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdateUserBasePermissionsResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let UpdateUserBasePermissions {
|
||||
user_id,
|
||||
enabled,
|
||||
@@ -72,10 +95,6 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
create_builds,
|
||||
} = self;
|
||||
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let user = find_one_by_id(&db_client().users, &user_id)
|
||||
.await
|
||||
.context("failed to query mongo for user")?
|
||||
@@ -107,7 +126,7 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
update_one_by_id(
|
||||
&db_client().users,
|
||||
&user_id,
|
||||
mungos::update::Update::Set(update_doc),
|
||||
database::mungos::update::Update::Set(update_doc),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
@@ -117,21 +136,30 @@ impl Resolve<WriteArgs> for UpdateUserBasePermissions {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
#[instrument(name = "UpdatePermissionOnResourceType", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdatePermissionOnResourceType",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_type.to_string(),
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdatePermissionOnResourceTypeResponse> {
|
||||
let UpdatePermissionOnResourceType {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let Self {
|
||||
user_target,
|
||||
resource_type,
|
||||
permission,
|
||||
} = self;
|
||||
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
// Some extra checks if user target is an actual User
|
||||
if let UserTarget::User(user_id) = &user_target {
|
||||
let user = get_user(user_id).await?;
|
||||
@@ -153,9 +181,11 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
|
||||
let id = ObjectId::from_str(&user_target_id)
|
||||
.context("id is not ObjectId")?;
|
||||
let field = format!("all.{resource_type}");
|
||||
let filter = doc! { "_id": id };
|
||||
let update = doc! { "$set": { &field: permission.as_ref() } };
|
||||
let field = format!("all.{resource_type}");
|
||||
let set =
|
||||
to_bson(&permission).context("permission is not Bson")?;
|
||||
let update = doc! { "$set": { &field: &set } };
|
||||
|
||||
match user_target_variant {
|
||||
UserTargetVariant::User => {
|
||||
@@ -164,7 +194,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to set {field}: {permission} on db")
|
||||
format!("failed to set {field}: {set} on db")
|
||||
})?;
|
||||
}
|
||||
UserTargetVariant::UserGroup => {
|
||||
@@ -173,7 +203,7 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
.update_one(filter, update)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!("failed to set {field}: {permission} on db")
|
||||
format!("failed to set {field}: {set} on db")
|
||||
})?;
|
||||
}
|
||||
}
|
||||
@@ -183,24 +213,37 @@ impl Resolve<WriteArgs> for UpdatePermissionOnResourceType {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
#[instrument(name = "UpdatePermissionOnTarget", skip(admin))]
|
||||
#[instrument(
|
||||
"UpdatePermissionOnTarget",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = admin.id,
|
||||
user_target = format!("{:?}", self.user_target),
|
||||
resource_type = self.resource_target.extract_variant().to_string(),
|
||||
resource_id = self.resource_target.extract_variant_id().1,
|
||||
permission = format!("{:?}", self.permission),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user: admin }: &WriteArgs,
|
||||
) -> serror::Result<UpdatePermissionOnTargetResponse> {
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
let UpdatePermissionOnTarget {
|
||||
user_target,
|
||||
resource_target,
|
||||
permission,
|
||||
} = self;
|
||||
|
||||
if !admin.admin {
|
||||
return Err(anyhow!("this method is admin only").into());
|
||||
}
|
||||
|
||||
// Some extra checks if user target is an actual User
|
||||
// Some extra checks relevant if user target is an actual User
|
||||
if let UserTarget::User(user_id) = &user_target {
|
||||
let user = get_user(user_id).await?;
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("user not enabled").into());
|
||||
}
|
||||
if user.admin {
|
||||
return Err(
|
||||
anyhow!(
|
||||
@@ -209,9 +252,6 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if !user.enabled {
|
||||
return Err(anyhow!("user not enabled").into());
|
||||
}
|
||||
}
|
||||
|
||||
let (user_target_variant, user_target_id) =
|
||||
@@ -223,6 +263,9 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
let (user_target_variant, resource_variant) =
|
||||
(user_target_variant.as_ref(), resource_variant.as_ref());
|
||||
|
||||
let specific = to_bson(&permission.specific)
|
||||
.context("permission.specific is not valid Bson")?;
|
||||
|
||||
db_client()
|
||||
.permissions
|
||||
.update_one(
|
||||
@@ -238,7 +281,8 @@ impl Resolve<WriteArgs> for UpdatePermissionOnTarget {
|
||||
"user_target.id": user_target_id,
|
||||
"resource_target.type": resource_variant,
|
||||
"resource_target.id": resource_id,
|
||||
"level": permission.as_ref(),
|
||||
"level": permission.level.as_ref(),
|
||||
"specific": specific
|
||||
}
|
||||
},
|
||||
)
|
||||
@@ -406,20 +450,6 @@ async fn extract_resource_target_with_validation(
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Action, id))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.server_templates
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for server templates")?
|
||||
.context("no matching server template found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ServerTemplate, id))
|
||||
}
|
||||
ResourceTarget::ResourceSync(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
|
||||
@@ -6,45 +6,70 @@ use komodo_client::{
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
use crate::{permission::get_check_permissions, resource};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateProcedure {
|
||||
#[instrument(name = "CreateProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"CreateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateProcedureResponse> {
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Procedure>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyProcedure {
|
||||
#[instrument(name = "CopyProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"CopyProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.name,
|
||||
copy_procedure = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CopyProcedureResponse> {
|
||||
let Procedure { config, .. } =
|
||||
resource::get_check_permissions::<Procedure>(
|
||||
get_check_permissions::<Procedure>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Procedure>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
resource::create::<Procedure>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
None,
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
#[instrument(name = "UpdateProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -57,7 +82,15 @@ impl Resolve<WriteArgs> for UpdateProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameProcedure {
|
||||
#[instrument(name = "RenameProcedure", skip(user))]
|
||||
#[instrument(
|
||||
"RenameProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -70,11 +103,18 @@ impl Resolve<WriteArgs> for RenameProcedure {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteProcedure {
|
||||
#[instrument(name = "DeleteProcedure", skip(args))]
|
||||
#[instrument(
|
||||
"DeleteProcedure",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
procedure = self.id
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteProcedureResponse> {
|
||||
Ok(resource::delete::<Procedure>(&self.id, args).await?)
|
||||
Ok(resource::delete::<Procedure>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
@@ -6,11 +10,9 @@ use komodo_client::{
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::{delete_one_by_id, find_one_by_id, update_one_by_id},
|
||||
mongodb::bson::{doc, to_document},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::{
|
||||
helpers::update::{add_update, make_update},
|
||||
@@ -20,25 +22,41 @@ use crate::{
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
#[instrument(
|
||||
"CreateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<CreateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can create git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can create git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: GitProviderAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
@@ -51,14 +69,14 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
.git_accounts
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context("failed to create git provider account on db")?
|
||||
.context("Failed to create git provider account on db")?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create git provider account",
|
||||
"Create git provider account",
|
||||
format!(
|
||||
"Created git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -70,7 +88,7 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create git provider account | {e:#}")
|
||||
error!("Failed to add update for create git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -79,33 +97,44 @@ impl Resolve<WriteArgs> for CreateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
#[instrument(
|
||||
"UpdateGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
https = self.account.https.unwrap_or(true),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can update git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can update git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty domain")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Cannot update git provider with empty domain")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!("cannot update git provider with empty username")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!("Cannot update git provider with empty username")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
// Ensure update does not change id
|
||||
@@ -118,7 +147,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"failed to serialize partial git provider account to bson",
|
||||
"Failed to serialize partial git provider account to bson",
|
||||
)?;
|
||||
let db = db_client();
|
||||
update_one_by_id(
|
||||
@@ -128,17 +157,17 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.context("failed to update git provider account on db")?;
|
||||
.context("Failed to update git provider account on db")?;
|
||||
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update git provider account",
|
||||
"Update git provider account",
|
||||
format!(
|
||||
"Updated git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -150,7 +179,7 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update git provider account | {e:#}")
|
||||
error!("Failed to add update for update git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -159,14 +188,22 @@ impl Resolve<WriteArgs> for UpdateGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
#[instrument(
|
||||
"DeleteGitProviderAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteGitProviderAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can delete git provider accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can delete git provider accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -179,16 +216,19 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
let db = db_client();
|
||||
let Some(account) = find_one_by_id(&db.git_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
};
|
||||
delete_one_by_id(&db.git_accounts, &self.id, None)
|
||||
.await
|
||||
.context("failed to delete git account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete git provider account",
|
||||
"Delete git provider account",
|
||||
format!(
|
||||
"Deleted git provider account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -200,7 +240,7 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete git provider account | {e:#}")
|
||||
error!("Failed to add update for delete git provider account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -209,6 +249,15 @@ impl Resolve<WriteArgs> for DeleteGitProviderAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"CreateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -216,20 +265,26 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"only admins can create docker registry account accounts"
|
||||
"Only admins can create docker registry account accounts"
|
||||
)
|
||||
.into(),
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let mut account: DockerRegistryAccount = self.account.into();
|
||||
|
||||
if account.domain.is_empty() {
|
||||
return Err(anyhow!("domain cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Domain cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if account.username.is_empty() {
|
||||
return Err(anyhow!("username cannot be empty string.").into());
|
||||
return Err(
|
||||
anyhow!("Username cannot be empty string.")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
let mut update = make_update(
|
||||
@@ -243,15 +298,15 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
.insert_one(&account)
|
||||
.await
|
||||
.context(
|
||||
"failed to create docker registry account account on db",
|
||||
"Failed to create docker registry account account on db",
|
||||
)?
|
||||
.inserted_id
|
||||
.as_object_id()
|
||||
.context("inserted id is not ObjectId")?
|
||||
.context("Inserted id is not ObjectId")?
|
||||
.to_string();
|
||||
|
||||
update.push_simple_log(
|
||||
"create docker registry account",
|
||||
"Create docker registry account",
|
||||
format!(
|
||||
"Created docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -263,7 +318,7 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for create docker registry account | {e:#}")
|
||||
error!("Failed to add update for create docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -272,37 +327,47 @@ impl Resolve<WriteArgs> for CreateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"UpdateDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
domain = self.account.domain,
|
||||
username = self.account.username,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
mut self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<UpdateDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can update docker registry accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can update docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(domain) = &self.account.domain {
|
||||
if domain.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty domain"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(domain) = &self.account.domain
|
||||
&& domain.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot update docker registry account with empty domain"
|
||||
)
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(username) = &self.account.username {
|
||||
if username.is_empty() {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"cannot update docker registry account with empty username"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if let Some(username) = &self.account.username
|
||||
&& username.is_empty()
|
||||
{
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot update docker registry account with empty username"
|
||||
)
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
|
||||
self.account.id = None;
|
||||
@@ -314,7 +379,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
);
|
||||
|
||||
let account = to_document(&self.account).context(
|
||||
"failed to serialize partial docker registry account account to bson",
|
||||
"Failed to serialize partial docker registry account account to bson",
|
||||
)?;
|
||||
|
||||
let db = db_client();
|
||||
@@ -326,19 +391,19 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
)
|
||||
.await
|
||||
.context(
|
||||
"failed to update docker registry account account on db",
|
||||
"Failed to update docker registry account account on db",
|
||||
)?;
|
||||
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for registry accounts")?
|
||||
.context("Failed to query db for registry accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(anyhow!("No account found with given id").into());
|
||||
};
|
||||
|
||||
update.push_simple_log(
|
||||
"update docker registry account",
|
||||
"Update docker registry account",
|
||||
format!(
|
||||
"Updated docker registry account account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -350,7 +415,7 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for update docker registry account | {e:#}")
|
||||
error!("Failed to add update for update docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
@@ -359,14 +424,22 @@ impl Resolve<WriteArgs> for UpdateDockerRegistryAccount {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
#[instrument(
|
||||
"DeleteDockerRegistryAccount",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
id = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteDockerRegistryAccountResponse> {
|
||||
if !user.admin {
|
||||
return Err(
|
||||
anyhow!("only admins can delete docker registry accounts")
|
||||
.into(),
|
||||
anyhow!("Only admins can delete docker registry accounts")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -380,16 +453,19 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
let Some(account) =
|
||||
find_one_by_id(&db.registry_accounts, &self.id)
|
||||
.await
|
||||
.context("failed to query db for git accounts")?
|
||||
.context("Failed to query db for git accounts")?
|
||||
else {
|
||||
return Err(anyhow!("no account found with given id").into());
|
||||
return Err(
|
||||
anyhow!("No account found with given id")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
};
|
||||
delete_one_by_id(&db.registry_accounts, &self.id, None)
|
||||
.await
|
||||
.context("failed to delete registry account on db")?;
|
||||
.context("Failed to delete registry account on db")?;
|
||||
|
||||
update.push_simple_log(
|
||||
"delete registry account",
|
||||
"Delete registry account",
|
||||
format!(
|
||||
"Deleted registry account for {} with username {}",
|
||||
account.domain, account.username
|
||||
@@ -401,7 +477,7 @@ impl Resolve<WriteArgs> for DeleteDockerRegistryAccount {
|
||||
add_update(update)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!("failed to add update for delete docker registry account | {e:#}")
|
||||
error!("Failed to add update for delete docker registry account | {e:#}")
|
||||
})
|
||||
.ok();
|
||||
|
||||
|
||||
@@ -1,24 +1,20 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use anyhow::Context;
|
||||
use database::mongo_indexed::doc;
|
||||
use database::mungos::{
|
||||
by_id::update_one_by_id, mongodb::bson::to_document,
|
||||
};
|
||||
use formatting::format_serror;
|
||||
use git::GitRes;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
CloneArgs, NoData, Operation,
|
||||
config::core::CoreConfig,
|
||||
komodo_timestamp,
|
||||
NoData, Operation, RepoExecutionArgs, komodo_timestamp,
|
||||
permission::PermissionLevel,
|
||||
repo::{PartialRepoConfig, Repo, RepoInfo},
|
||||
repo::{Repo, RepoInfo},
|
||||
server::Server,
|
||||
to_komodo_name,
|
||||
to_path_compatible_name,
|
||||
update::{Log, Update},
|
||||
},
|
||||
};
|
||||
use mongo_indexed::doc;
|
||||
use mungos::{by_id::update_one_by_id, mongodb::bson::to_document};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
@@ -28,51 +24,84 @@ use crate::{
|
||||
git_token, periphery_client,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
state::{action_states, db_client, github_client},
|
||||
state::{action_states, db_client},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepo {
|
||||
#[instrument(name = "CreateRepo", skip(user))]
|
||||
#[instrument(
|
||||
"CreateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::create::<Repo>(&self.name, self.config, user).await?)
|
||||
resource::create::<Repo>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyRepo {
|
||||
#[instrument(name = "CopyRepo", skip(user))]
|
||||
#[instrument(
|
||||
"CopyRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.name,
|
||||
copy_repo = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
let Repo { config, .. } =
|
||||
resource::get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Repo>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
let Repo { config, .. } = get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
resource::create::<Repo>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepo {
|
||||
#[instrument(name = "DeleteRepo", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Repo> {
|
||||
Ok(resource::delete::<Repo>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateRepo {
|
||||
#[instrument(name = "UpdateRepo", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -82,15 +111,23 @@ impl Resolve<WriteArgs> for UpdateRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameRepo {
|
||||
#[instrument(name = "RenameRepo", skip(user))]
|
||||
#[instrument(
|
||||
"RenameRepo",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
repo = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -111,14 +148,14 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let _action_guard =
|
||||
action_state.update(|state| state.renaming = true)?;
|
||||
|
||||
let name = to_komodo_name(&self.name);
|
||||
let name = to_path_compatible_name(&self.name);
|
||||
|
||||
let mut update = make_update(&repo, Operation::RenameRepo, user);
|
||||
|
||||
update_one_by_id(
|
||||
&db_client().repos,
|
||||
&repo.id,
|
||||
mungos::update::Update::Set(
|
||||
database::mungos::update::Update::Set(
|
||||
doc! { "name": &name, "updated_at": komodo_timestamp() },
|
||||
),
|
||||
None,
|
||||
@@ -129,9 +166,10 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
let server =
|
||||
resource::get::<Server>(&repo.config.server_id).await?;
|
||||
|
||||
let log = match periphery_client(&server)?
|
||||
let log = match periphery_client(&server)
|
||||
.await?
|
||||
.request(api::git::RenameRepo {
|
||||
curr_name: to_komodo_name(&repo.name),
|
||||
curr_name: to_path_compatible_name(&repo.name),
|
||||
new_name: name.clone(),
|
||||
})
|
||||
.await
|
||||
@@ -158,21 +196,16 @@ impl Resolve<WriteArgs> for RenameRepo {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
#[instrument(
|
||||
name = "RefreshRepoCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// repo should be able to do this.
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
let repo = get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -183,13 +216,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
|
||||
let mut clone_args: CloneArgs = (&repo).into();
|
||||
let mut clone_args: RepoExecutionArgs = (&repo).into();
|
||||
let repo_path =
|
||||
clone_args.unique_path(&core_config().repo_directory)?;
|
||||
clone_args.destination = Some(repo_path.display().to_string());
|
||||
// Don't want to run these on core.
|
||||
clone_args.on_clone = None;
|
||||
clone_args.on_pull = None;
|
||||
|
||||
let access_token = if let Some(username) = &clone_args.account {
|
||||
git_token(&clone_args.provider, username, |https| {
|
||||
@@ -203,14 +233,10 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
None
|
||||
};
|
||||
|
||||
let GitRes { hash, message, .. } = git::pull_or_clone(
|
||||
let (res, _) = git::pull_or_clone(
|
||||
clone_args,
|
||||
&core_config().repo_directory,
|
||||
access_token,
|
||||
&[],
|
||||
"",
|
||||
None,
|
||||
&[],
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
@@ -222,8 +248,8 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
last_built_at: repo.info.last_built_at,
|
||||
built_hash: repo.info.built_hash,
|
||||
built_message: repo.info.built_message,
|
||||
latest_hash: hash,
|
||||
latest_message: message,
|
||||
latest_hash: res.commit_hash,
|
||||
latest_message: res.commit_message,
|
||||
};
|
||||
|
||||
let info = to_document(&info)
|
||||
@@ -241,220 +267,3 @@ impl Resolve<WriteArgs> for RefreshRepoCache {
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateRepoWebhook {
|
||||
#[instrument(name = "CreateRepoWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
&args.user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if repo.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&repo.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo_name, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !repo.config.webhook_enabled {
|
||||
UpdateRepo {
|
||||
id: repo.id,
|
||||
config: PartialRepoConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update repo to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteRepoWebhook {
|
||||
#[instrument(name = "DeleteRepoWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteRepoWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo = resource::get_check_permissions::<Repo>(
|
||||
&self.repo,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if repo.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if repo.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = repo.config.repo.split('/');
|
||||
let owner = split.next().context("Repo repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo_name =
|
||||
split.next().context("Repo repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo_name)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
RepoWebhookAction::Clone => {
|
||||
format!("{host}/listener/github/repo/{}/clone", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Pull => {
|
||||
format!("{host}/listener/github/repo/{}/pull", repo.id)
|
||||
}
|
||||
RepoWebhookAction::Build => {
|
||||
format!("{host}/listener/github/repo/{}/build", repo.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo_name, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
82
bin/core/src/api/write/resource.rs
Normal file
82
bin/core/src/api/write/resource.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use anyhow::anyhow;
|
||||
use derive_variants::ExtractVariant as _;
|
||||
use komodo_client::{
|
||||
api::write::{UpdateResourceMeta, UpdateResourceMetaResponse},
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use reqwest::StatusCode;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
|
||||
use crate::resource::{self, ResourceMetaUpdate};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateResourceMeta {
|
||||
#[instrument(
|
||||
"UpdateResourceMeta",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
resource_type = self.target.extract_variant().to_string(),
|
||||
resource_id = self.target.extract_variant_id().1,
|
||||
description = self.description,
|
||||
template = self.template,
|
||||
tags = format!("{:?}", self.tags),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<UpdateResourceMetaResponse> {
|
||||
let meta = ResourceMetaUpdate {
|
||||
description: self.description,
|
||||
template: self.template,
|
||||
tags: self.tags,
|
||||
};
|
||||
match self.target {
|
||||
ResourceTarget::System(_) => {
|
||||
return Err(
|
||||
anyhow!("cannot update meta of System resource target")
|
||||
.status_code(StatusCode::BAD_REQUEST),
|
||||
);
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::update_meta::<Server>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Deployment(id) => {
|
||||
resource::update_meta::<Deployment>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Build(id) => {
|
||||
resource::update_meta::<Build>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Repo(id) => {
|
||||
resource::update_meta::<Repo>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Builder(id) => {
|
||||
resource::update_meta::<Builder>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Alerter(id) => {
|
||||
resource::update_meta::<Alerter>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Procedure(id) => {
|
||||
resource::update_meta::<Procedure>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::Action(id) => {
|
||||
resource::update_meta::<Action>(&id, meta, args).await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_meta::<ResourceSync>(&id, meta, args)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::Stack(id) => {
|
||||
resource::update_meta::<Stack>(&id, meta, args).await?;
|
||||
}
|
||||
}
|
||||
Ok(UpdateResourceMetaResponse {})
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,12 @@
|
||||
use formatting::format_serror;
|
||||
use anyhow::Context;
|
||||
use formatting::{bold, format_serror};
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
Operation,
|
||||
NoData, Operation,
|
||||
permission::PermissionLevel,
|
||||
server::Server,
|
||||
server::{Server, ServerInfo},
|
||||
to_docker_compatible_name,
|
||||
update::{Update, UpdateStatus},
|
||||
},
|
||||
};
|
||||
@@ -16,33 +18,100 @@ use crate::{
|
||||
periphery_client,
|
||||
update::{add_update, make_update, update_update},
|
||||
},
|
||||
resource,
|
||||
permission::get_check_permissions,
|
||||
resource::{self, update_server_public_key},
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServer {
|
||||
#[instrument(name = "CreateServer", skip(user))]
|
||||
#[instrument(
|
||||
"CreateServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap()
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(
|
||||
resource::create::<Server>(&self.name, self.config, user)
|
||||
.await?,
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
self.config,
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyServer {
|
||||
#[instrument(
|
||||
"CopyServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.name,
|
||||
copy_server = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
let Server { config, .. } = get_check_permissions::<Server>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
resource::create::<Server>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
self.public_key.map(|public_key| ServerInfo {
|
||||
public_key,
|
||||
..Default::default()
|
||||
}),
|
||||
user,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteServer {
|
||||
#[instrument(name = "DeleteServer", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Server> {
|
||||
Ok(resource::delete::<Server>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Server> {
|
||||
Ok(resource::delete::<Server>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServer {
|
||||
#[instrument(name = "UpdateServer", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -52,7 +121,15 @@ impl Resolve<WriteArgs> for UpdateServer {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameServer {
|
||||
#[instrument(name = "RenameServer", skip(user))]
|
||||
#[instrument(
|
||||
"RenameServer",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.id,
|
||||
new_name = self.name,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -62,19 +139,27 @@ impl Resolve<WriteArgs> for RenameServer {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateNetwork {
|
||||
#[instrument(name = "CreateNetwork", skip(user))]
|
||||
#[instrument(
|
||||
"CreateNetwork",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
network = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::CreateNetwork, user);
|
||||
@@ -82,8 +167,8 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
match periphery
|
||||
.request(api::network::CreateNetwork {
|
||||
name: self.name,
|
||||
.request(api::docker::CreateNetwork {
|
||||
name: to_docker_compatible_name(&self.name),
|
||||
driver: None,
|
||||
})
|
||||
.await
|
||||
@@ -91,7 +176,7 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
Ok(log) => update.logs.push(log),
|
||||
Err(e) => update.push_error_log(
|
||||
"create network",
|
||||
format_serror(&e.context("failed to create network").into()),
|
||||
format_serror(&e.context("Failed to create network").into()),
|
||||
),
|
||||
};
|
||||
|
||||
@@ -101,3 +186,184 @@ impl Resolve<WriteArgs> for CreateNetwork {
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateTerminal {
|
||||
#[instrument(
|
||||
"CreateTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
terminal = self.name,
|
||||
command = self.command,
|
||||
recreate = format!("{:?}", self.recreate),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::CreateTerminal {
|
||||
name: self.name,
|
||||
command: self.command,
|
||||
recreate: self.recreate,
|
||||
})
|
||||
.await
|
||||
.context("Failed to create terminal on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteTerminal {
|
||||
#[instrument(
|
||||
"DeleteTerminal",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
terminal = self.terminal,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteTerminal {
|
||||
terminal: self.terminal,
|
||||
})
|
||||
.await
|
||||
.context("Failed to delete terminal on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteAllTerminals {
|
||||
#[instrument(
|
||||
"DeleteAllTerminals",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
user,
|
||||
PermissionLevel::Write.terminal(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
periphery
|
||||
.request(api::terminal::DeleteAllTerminals {})
|
||||
.await
|
||||
.context("Failed to delete all terminals on Periphery")?;
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServerPublicKey {
|
||||
#[instrument(
|
||||
"UpdateServerPublicKey",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
server = self.server,
|
||||
public_key = self.public_key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
update_server_public_key(&server.id, &self.public_key).await?;
|
||||
|
||||
let mut update =
|
||||
make_update(&server, Operation::UpdateServerKey, &args.user);
|
||||
|
||||
update.push_simple_log(
|
||||
"Update Server Public Key",
|
||||
format!("Public key updated to {}", bold(&self.public_key)),
|
||||
);
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
impl Resolve<WriteArgs> for RotateServerKeys {
|
||||
#[instrument(
|
||||
"RotateServerKeys",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = args.user.id,
|
||||
server = self.server,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> Result<Self::Response, Self::Error> {
|
||||
let server = get_check_permissions::<Server>(
|
||||
&self.server,
|
||||
&args.user,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server).await?;
|
||||
|
||||
let public_key = periphery
|
||||
.request(api::keys::RotatePrivateKey {})
|
||||
.await
|
||||
.context("Failed to rotate Periphery private key")?
|
||||
.public_key;
|
||||
|
||||
UpdateServerPublicKey {
|
||||
server: server.id,
|
||||
public_key,
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
|
||||
RenameServerTemplate, UpdateServerTemplate,
|
||||
},
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServerTemplate {
|
||||
#[instrument(name = "CreateServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(
|
||||
resource::create::<ServerTemplate>(
|
||||
&self.name,
|
||||
self.config,
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyServerTemplate {
|
||||
#[instrument(name = "CopyServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
let ServerTemplate { config, .. } =
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<ServerTemplate>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteServerTemplate {
|
||||
#[instrument(name = "DeleteServerTemplate", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(resource::delete::<ServerTemplate>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServerTemplate {
|
||||
#[instrument(name = "UpdateServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(
|
||||
resource::update::<ServerTemplate>(&self.id, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameServerTemplate {
|
||||
#[instrument(name = "RenameServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(
|
||||
resource::rename::<ServerTemplate>(&self.id, &self.name, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,10 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::{
|
||||
by_id::find_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use komodo_client::{
|
||||
api::{user::CreateApiKey, write::*},
|
||||
entities::{
|
||||
@@ -8,10 +12,6 @@ use komodo_client::{
|
||||
user::{User, UserConfig},
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
by_id::find_one_by_id,
|
||||
mongodb::bson::{doc, oid::ObjectId},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{api::user::UserArgs, state::db_client};
|
||||
@@ -19,7 +19,15 @@ use crate::{api::user::UserArgs, state::db_client};
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServiceUser {
|
||||
#[instrument(name = "CreateServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"CreateServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
username = self.username,
|
||||
description = self.description,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -63,7 +71,15 @@ impl Resolve<WriteArgs> for CreateServiceUser {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServiceUserDescription {
|
||||
#[instrument(name = "UpdateServiceUserDescription", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateServiceUserDescription",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
username = self.username,
|
||||
description = self.description,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -99,7 +115,16 @@ impl Resolve<WriteArgs> for UpdateServiceUserDescription {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
|
||||
#[instrument(name = "CreateApiKeyForServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"CreateApiKeyForServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
service_user = self.user_id,
|
||||
name = self.name,
|
||||
expires = self.expires,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -125,7 +150,14 @@ impl Resolve<WriteArgs> for CreateApiKeyForServiceUser {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteApiKeyForServiceUser {
|
||||
#[instrument(name = "DeleteApiKeyForServiceUser", skip(user))]
|
||||
#[instrument(
|
||||
"DeleteApiKeyForServiceUser",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
key = self.key,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
|
||||
@@ -1,88 +1,118 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use database::mungos::mongodb::bson::{doc, to_document};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::write::*,
|
||||
entities::{
|
||||
FileContents, NoData, Operation,
|
||||
config::core::CoreConfig,
|
||||
FileContents, NoData, Operation, RepoExecutionArgs,
|
||||
all_logs_success,
|
||||
permission::PermissionLevel,
|
||||
repo::Repo,
|
||||
server::ServerState,
|
||||
stack::{PartialStackConfig, Stack, StackInfo},
|
||||
stack::{Stack, StackInfo},
|
||||
update::Update,
|
||||
user::stack_user,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::{doc, to_document};
|
||||
use octorust::types::{
|
||||
ReposCreateWebhookRequest, ReposCreateWebhookRequestConfig,
|
||||
};
|
||||
use periphery_client::api::compose::{
|
||||
GetComposeContentsOnHost, GetComposeContentsOnHostResponse,
|
||||
WriteCommitComposeContents, WriteComposeContentsToHost,
|
||||
WriteComposeContentsToHost,
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::execute::pull_stack_inner,
|
||||
config::core_config,
|
||||
helpers::{
|
||||
git_token, periphery_client,
|
||||
periphery_client,
|
||||
query::get_server_with_state,
|
||||
stack_git_token,
|
||||
update::{add_update, make_update},
|
||||
},
|
||||
permission::get_check_permissions,
|
||||
resource,
|
||||
stack::{
|
||||
get_stack_and_server,
|
||||
remote::{RemoteComposeContents, get_repo_compose_contents},
|
||||
services::extract_services_into_res,
|
||||
},
|
||||
state::{db_client, github_client},
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateStack {
|
||||
#[instrument(name = "CreateStack", skip(user))]
|
||||
#[instrument(
|
||||
"CreateStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.name,
|
||||
config = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
resource::create::<Stack>(&self.name, self.config, None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyStack {
|
||||
#[instrument(name = "CopyStack", skip(user))]
|
||||
#[instrument(
|
||||
"CopyStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.name,
|
||||
copy_stack = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
let Stack { config, .. } =
|
||||
resource::get_check_permissions::<Stack>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<Stack>(&self.name, config.into(), user)
|
||||
.await?,
|
||||
let Stack { config, .. } = get_check_permissions::<Stack>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Read.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
resource::create::<Stack>(&self.name, config.into(), None, user)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteStack {
|
||||
#[instrument(name = "DeleteStack", skip(args))]
|
||||
async fn resolve(self, args: &WriteArgs) -> serror::Result<Stack> {
|
||||
Ok(resource::delete::<Stack>(&self.id, args).await?)
|
||||
#[instrument(
|
||||
"DeleteStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Stack> {
|
||||
Ok(resource::delete::<Stack>(&self.id, user).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateStack {
|
||||
#[instrument(name = "UpdateStack", skip(user))]
|
||||
#[instrument(
|
||||
"UpdateStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
update = serde_json::to_string(&self.config).unwrap(),
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -92,7 +122,15 @@ impl Resolve<WriteArgs> for UpdateStack {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameStack {
|
||||
#[instrument(name = "RenameStack", skip(user))]
|
||||
#[instrument(
|
||||
"RenameStack",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.id,
|
||||
new_name = self.name
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -102,7 +140,15 @@ impl Resolve<WriteArgs> for RenameStack {
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
#[instrument(name = "WriteStackFileContents", skip(user))]
|
||||
#[instrument(
|
||||
"WriteStackFileContents",
|
||||
skip_all,
|
||||
fields(
|
||||
operator = user.id,
|
||||
stack = self.stack,
|
||||
path = self.file_path,
|
||||
)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
@@ -112,17 +158,19 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
file_path,
|
||||
contents,
|
||||
} = self;
|
||||
let (mut stack, server) = get_stack_and_server(
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&stack,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
true,
|
||||
PermissionLevel::Write.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !stack.config.files_on_host && stack.config.repo.is_empty() {
|
||||
if !stack.config.files_on_host
|
||||
&& stack.config.repo.is_empty()
|
||||
&& stack.config.linked_repo.is_empty()
|
||||
{
|
||||
return Err(anyhow!(
|
||||
"Stack is not configured to use Files on Host or Git Repo, can't write file contents"
|
||||
"Stack is not configured to use Files on Host, Git Repo, or Linked Repo, can't write file contents"
|
||||
).into());
|
||||
}
|
||||
|
||||
@@ -131,113 +179,265 @@ impl Resolve<WriteArgs> for WriteStackFileContents {
|
||||
|
||||
update.push_simple_log("File contents to write", &contents);
|
||||
|
||||
let stack_id = stack.id.clone();
|
||||
|
||||
if stack.config.files_on_host {
|
||||
match periphery_client(&server)?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
file_path,
|
||||
contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
let git_token = if !stack.config.git_account.is_empty() {
|
||||
git_token(
|
||||
&stack.config.git_provider,
|
||||
&stack.config.git_account,
|
||||
|https| stack.config.git_https = https,
|
||||
)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to get git token. | {} | {}",
|
||||
stack.config.git_account, stack.config.git_provider
|
||||
)
|
||||
})?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
match periphery_client(&server)?
|
||||
.request(WriteCommitComposeContents {
|
||||
stack,
|
||||
username: Some(user.username.clone()),
|
||||
file_path,
|
||||
contents,
|
||||
git_token,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(res) => {
|
||||
update.logs.extend(res.logs);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if let Err(e) = (RefreshStackCache { stack: stack_id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
write_stack_file_contents_on_host(
|
||||
stack, file_path, contents, update,
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
.await
|
||||
} else {
|
||||
write_stack_file_contents_git(
|
||||
stack,
|
||||
&file_path,
|
||||
&contents,
|
||||
&user.username,
|
||||
update,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument("WriteStackFileContentsOnHost", skip_all)]
|
||||
async fn write_stack_file_contents_on_host(
|
||||
stack: Stack,
|
||||
file_path: String,
|
||||
contents: String,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
if stack.config.server_id.is_empty() {
|
||||
return Err(anyhow!(
|
||||
"Cannot write file, Files on host Stack has not configured a Server"
|
||||
).into());
|
||||
}
|
||||
let (server, state) =
|
||||
get_server_with_state(&stack.config.server_id).await?;
|
||||
if state != ServerState::Ok {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Cannot write file when server is unreachable or disabled"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(WriteComposeContentsToHost {
|
||||
name: stack.name,
|
||||
run_directory: stack.config.run_directory,
|
||||
file_path,
|
||||
contents,
|
||||
})
|
||||
.await
|
||||
.context("Failed to write contents to host")
|
||||
{
|
||||
Ok(log) => {
|
||||
update.logs.push(log);
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"Write File Contents",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
// Finish with a cache refresh
|
||||
if let Err(e) = (RefreshStackCache { stack: stack.id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
#[instrument("WriteStackFileContentsGit", skip_all)]
|
||||
async fn write_stack_file_contents_git(
|
||||
mut stack: Stack,
|
||||
file_path: &str,
|
||||
contents: &str,
|
||||
username: &str,
|
||||
mut update: Update,
|
||||
) -> serror::Result<Update> {
|
||||
let mut repo = if !stack.config.linked_repo.is_empty() {
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let git_token = stack_git_token(&mut stack, repo.as_mut()).await?;
|
||||
|
||||
let mut repo_args: RepoExecutionArgs = if let Some(repo) = &repo {
|
||||
repo.into()
|
||||
} else {
|
||||
(&stack).into()
|
||||
};
|
||||
let root = repo_args.unique_path(&core_config().repo_directory)?;
|
||||
repo_args.destination = Some(root.display().to_string());
|
||||
|
||||
let file_path = stack
|
||||
.config
|
||||
.run_directory
|
||||
.parse::<PathBuf>()
|
||||
.context("Run directory is not a valid path")?
|
||||
.join(file_path);
|
||||
let full_path =
|
||||
root.join(&file_path).components().collect::<PathBuf>();
|
||||
|
||||
if let Some(parent) = full_path.parent() {
|
||||
tokio::fs::create_dir_all(parent).await.with_context(|| {
|
||||
format!(
|
||||
"Failed to initialize stack file parent directory {parent:?}"
|
||||
)
|
||||
})?;
|
||||
}
|
||||
|
||||
// Ensure the folder is initialized as git repo.
|
||||
// This allows a new file to be committed on a branch that may not exist.
|
||||
if !root.join(".git").exists() {
|
||||
git::init_folder_as_repo(
|
||||
&root,
|
||||
&repo_args,
|
||||
git_token.as_deref(),
|
||||
&mut update.logs,
|
||||
)
|
||||
.await;
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
}
|
||||
|
||||
// Save this for later -- repo_args moved next.
|
||||
let branch = repo_args.branch.clone();
|
||||
// Pull latest changes to repo to ensure linear commit history
|
||||
match git::pull_or_clone(
|
||||
repo_args,
|
||||
&core_config().repo_directory,
|
||||
git_token,
|
||||
)
|
||||
.await
|
||||
.context("Failed to pull latest changes before commit")
|
||||
{
|
||||
Ok((res, _)) => update.logs.extend(res.logs),
|
||||
Err(e) => {
|
||||
update.push_error_log("Pull Repo", format_serror(&e.into()));
|
||||
update.finalize();
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
if let Err(e) = tokio::fs::write(&full_path, &contents)
|
||||
.await
|
||||
.with_context(|| {
|
||||
format!(
|
||||
"Failed to write compose file contents to {full_path:?}"
|
||||
)
|
||||
})
|
||||
{
|
||||
update.push_error_log("Write File", format_serror(&e.into()));
|
||||
} else {
|
||||
update.push_simple_log(
|
||||
"Write File",
|
||||
format!("File written to {full_path:?}"),
|
||||
);
|
||||
};
|
||||
|
||||
if !all_logs_success(&update.logs) {
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
return Ok(update);
|
||||
}
|
||||
|
||||
let commit_res = git::commit_file(
|
||||
&format!("{username}: Write Stack File"),
|
||||
&root,
|
||||
&file_path,
|
||||
&branch,
|
||||
)
|
||||
.await;
|
||||
|
||||
update.logs.extend(commit_res.logs);
|
||||
|
||||
// Finish with a cache refresh
|
||||
if let Err(e) = (RefreshStackCache { stack: stack.id })
|
||||
.resolve(&WriteArgs {
|
||||
user: stack_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context(
|
||||
"Failed to refresh stack cache after writing file contents",
|
||||
)
|
||||
{
|
||||
update.push_error_log(
|
||||
"Refresh stack cache",
|
||||
format_serror(&e.into()),
|
||||
);
|
||||
}
|
||||
|
||||
update.finalize();
|
||||
update.id = add_update(update.clone()).await?;
|
||||
|
||||
Ok(update)
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
#[instrument(
|
||||
name = "RefreshStackCache",
|
||||
level = "debug",
|
||||
skip(user)
|
||||
)]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<NoData> {
|
||||
// Even though this is a write request, this doesn't change any config. Anyone that can execute the
|
||||
// stack should be able to do this.
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
let stack = get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
PermissionLevel::Execute.into(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let repo = if !stack.config.files_on_host
|
||||
&& !stack.config.linked_repo.is_empty()
|
||||
{
|
||||
crate::resource::get::<Repo>(&stack.config.linked_repo)
|
||||
.await?
|
||||
.into()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let file_contents_empty = stack.config.file_contents.is_empty();
|
||||
let repo_empty = stack.config.repo.is_empty();
|
||||
let repo_empty =
|
||||
stack.config.repo.is_empty() && repo.as_ref().is_none();
|
||||
|
||||
if !stack.config.files_on_host
|
||||
&& file_contents_empty
|
||||
@@ -270,9 +470,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
(vec![], None, None, None, None)
|
||||
} else if let Some(server) = server {
|
||||
let GetComposeContentsOnHostResponse { contents, errors } =
|
||||
match periphery_client(&server)?
|
||||
match periphery_client(&server)
|
||||
.await?
|
||||
.request(GetComposeContentsOnHost {
|
||||
file_paths: stack.file_paths().to_vec(),
|
||||
file_paths: stack.all_file_dependencies(),
|
||||
name: stack.name.clone(),
|
||||
run_directory: stack.config.run_directory.clone(),
|
||||
})
|
||||
@@ -294,6 +495,10 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
@@ -320,14 +525,22 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
hash: latest_hash,
|
||||
message: latest_message,
|
||||
..
|
||||
} = get_repo_compose_contents(&stack, Some(&mut missing_files))
|
||||
.await?;
|
||||
} = get_repo_compose_contents(
|
||||
&stack,
|
||||
repo.as_ref(),
|
||||
Some(&mut missing_files),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let project_name = stack.project_name(true);
|
||||
|
||||
let mut services = Vec::new();
|
||||
|
||||
for contents in &remote_contents {
|
||||
// Don't include additional files in service parsing
|
||||
if !stack.is_compose_file(&contents.path) {
|
||||
continue;
|
||||
}
|
||||
if let Err(e) = extract_services_into_res(
|
||||
&project_name,
|
||||
&contents.contents,
|
||||
@@ -394,236 +607,6 @@ impl Resolve<WriteArgs> for RefreshStackCache {
|
||||
.await
|
||||
.context("failed to update stack info on db")?;
|
||||
|
||||
if (stack.config.poll_for_updates || stack.config.auto_update)
|
||||
&& !stack.config.server_id.is_empty()
|
||||
{
|
||||
let (server, state) =
|
||||
get_server_with_state(&stack.config.server_id).await?;
|
||||
if state == ServerState::Ok {
|
||||
let name = stack.name.clone();
|
||||
if let Err(e) =
|
||||
pull_stack_inner(stack, Vec::new(), &server, None).await
|
||||
{
|
||||
warn!(
|
||||
"Failed to pull latest images for Stack {name} | {e:#}",
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CreateStackWebhook {
|
||||
#[instrument(name = "CreateStackWebhook", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<CreateStackWebhookResponse> {
|
||||
let WriteArgs { user } = args;
|
||||
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Stack repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
webhook_secret,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let webhook_secret = if stack.config.webhook_secret.is_empty() {
|
||||
webhook_secret
|
||||
} else {
|
||||
&stack.config.webhook_secret
|
||||
};
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// Now good to create the webhook
|
||||
let request = ReposCreateWebhookRequest {
|
||||
active: Some(true),
|
||||
config: Some(ReposCreateWebhookRequestConfig {
|
||||
url,
|
||||
secret: webhook_secret.to_string(),
|
||||
content_type: String::from("json"),
|
||||
insecure_ssl: None,
|
||||
digest: Default::default(),
|
||||
token: Default::default(),
|
||||
}),
|
||||
events: vec![String::from("push")],
|
||||
name: String::from("web"),
|
||||
};
|
||||
github_repos
|
||||
.create_webhook(owner, repo, &request)
|
||||
.await
|
||||
.context("failed to create webhook")?;
|
||||
|
||||
if !stack.config.webhook_enabled {
|
||||
UpdateStack {
|
||||
id: stack.id,
|
||||
config: PartialStackConfig {
|
||||
webhook_enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
.resolve(args)
|
||||
.await
|
||||
.map_err(|e| e.error)
|
||||
.context("failed to update stack to enable webhook")?;
|
||||
}
|
||||
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteStackWebhook {
|
||||
#[instrument(name = "DeleteStackWebhook", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<DeleteStackWebhookResponse> {
|
||||
let Some(github) = github_client() else {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"github_webhook_app is not configured in core config toml"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let stack = resource::get_check_permissions::<Stack>(
|
||||
&self.stack,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if stack.config.git_provider != "github.com" {
|
||||
return Err(
|
||||
anyhow!("Can only manage github.com repo webhooks").into(),
|
||||
);
|
||||
}
|
||||
|
||||
if stack.config.repo.is_empty() {
|
||||
return Err(
|
||||
anyhow!("No repo configured, can't create webhook").into(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut split = stack.config.repo.split('/');
|
||||
let owner = split.next().context("Stack repo has no owner")?;
|
||||
|
||||
let Some(github) = github.get(owner) else {
|
||||
return Err(
|
||||
anyhow!("Cannot manage repo webhooks under owner {owner}")
|
||||
.into(),
|
||||
);
|
||||
};
|
||||
|
||||
let repo =
|
||||
split.next().context("Sync repo has no repo after the /")?;
|
||||
|
||||
let github_repos = github.repos();
|
||||
|
||||
// First make sure the webhook isn't already created (inactive ones are ignored)
|
||||
let webhooks = github_repos
|
||||
.list_all_webhooks(owner, repo)
|
||||
.await
|
||||
.context("failed to list all webhooks on repo")?
|
||||
.body;
|
||||
|
||||
let CoreConfig {
|
||||
host,
|
||||
webhook_base_url,
|
||||
..
|
||||
} = core_config();
|
||||
|
||||
let host = if webhook_base_url.is_empty() {
|
||||
host
|
||||
} else {
|
||||
webhook_base_url
|
||||
};
|
||||
let url = match self.action {
|
||||
StackWebhookAction::Refresh => {
|
||||
format!("{host}/listener/github/stack/{}/refresh", stack.id)
|
||||
}
|
||||
StackWebhookAction::Deploy => {
|
||||
format!("{host}/listener/github/stack/{}/deploy", stack.id)
|
||||
}
|
||||
};
|
||||
|
||||
for webhook in webhooks {
|
||||
if webhook.active && webhook.config.url == url {
|
||||
github_repos
|
||||
.delete_webhook(owner, repo, webhook.id)
|
||||
.await
|
||||
.context("failed to delete webhook")?;
|
||||
return Ok(NoData {});
|
||||
}
|
||||
}
|
||||
|
||||
// No webhook to delete, all good
|
||||
Ok(NoData {})
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user