mirror of
https://github.com/moghtech/komodo.git
synced 2025-12-05 19:17:36 -06:00
1.17.5 (#472)
* API support new calling syntax
* finish /{variant} api to improve network logs in browser console
* update roadmap
* configure the shell used to start the pty
* start on ExecuteTerminal api
* Rename resources less hidden - click on name in header
* update deps
* execute terminal
* BatchPullStack
* add Types import to Actions, and don't stringify the error
* add --reload for cached deps
* type execute terminal response as AsyncIterable
* execute terminal client api
* KOMODO_EXIT_CODE
* Early exit without code
* action configurable deno dep reload
* remove ServerTemplate resource
* kept disabled
* rework exec terminal command wrapper
* debug: print lines in start sentinel loop
* edit debug / remove ref
* echo
* line compare
* log lengths
* use printf again
* check char compare
* leading \n
* works with leading \n
* extra \n after START_OF_OUTPUT
* add variables / secrets finders to ui defined stacks / builds
* isolate post-db startup procedures
* clean up server templates
* disable websocket reconnect from core config
* change periphery ssl enabled to default to true
* git provider selector config pass through disable to http/s button
* disable terminals while allowing container exec
* disable_container_exec in default config
* update ws reconnect implementation
* Don't show delete tag non admin and non owner
* 1.17.5 complete
This commit is contained in:
154
Cargo.lock
generated
154
Cargo.lock
generated
@@ -254,9 +254,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-ec2"
|
||||
version = "1.123.0"
|
||||
version = "1.124.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b03632589dee533daf47c598819353995b20e4db42a84078fffff11730c3fbbd"
|
||||
checksum = "6746a315a5446304942f057e6a072347dad558d23bfbda64c42b9a236f824013"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -547,9 +547,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum"
|
||||
version = "0.8.3"
|
||||
version = "0.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de45108900e1f9b9242f7f2e254aa3e2c029c921c258fe9e6b4217eeebd54288"
|
||||
checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5"
|
||||
dependencies = [
|
||||
"axum-core",
|
||||
"axum-macros",
|
||||
@@ -893,7 +893,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "cache"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"tokio",
|
||||
@@ -939,9 +939,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.40"
|
||||
version = "0.4.41"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c"
|
||||
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
|
||||
dependencies = [
|
||||
"android-tzdata",
|
||||
"iana-time-zone",
|
||||
@@ -1060,7 +1060,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "command"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"formatting",
|
||||
@@ -1544,7 +1544,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "environment_file"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
@@ -1624,7 +1624,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "formatting"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"serror",
|
||||
]
|
||||
@@ -1786,7 +1786,7 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
|
||||
|
||||
[[package]]
|
||||
name = "git"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"cache",
|
||||
@@ -2229,7 +2229,7 @@ dependencies = [
|
||||
"js-sys",
|
||||
"log",
|
||||
"wasm-bindgen",
|
||||
"windows-core 0.61.0",
|
||||
"windows-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2523,7 +2523,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_cli"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"clap",
|
||||
@@ -2539,7 +2539,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_client"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2570,7 +2570,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_core"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"arc-swap",
|
||||
@@ -2639,7 +2639,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "komodo_periphery"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async_timing_util",
|
||||
@@ -2661,6 +2661,7 @@ dependencies = [
|
||||
"logger",
|
||||
"merge_config_files",
|
||||
"periphery_client",
|
||||
"pin-project-lite",
|
||||
"portable-pty",
|
||||
"rand 0.9.1",
|
||||
"resolver_api",
|
||||
@@ -2674,6 +2675,7 @@ dependencies = [
|
||||
"svi",
|
||||
"sysinfo",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"uuid",
|
||||
@@ -2755,7 +2757,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "logger"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -3151,6 +3153,16 @@ dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "objc2-io-kit"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71c1c64d6120e51cd86033f67176b1cb66780c2efe34dec55176f77befd93c0a"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"objc2-core-foundation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.36.7"
|
||||
@@ -3500,7 +3512,7 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
|
||||
|
||||
[[package]]
|
||||
name = "periphery_client"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"komodo_client",
|
||||
@@ -4028,7 +4040,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "response"
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"axum",
|
||||
@@ -4455,9 +4467,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_qs"
|
||||
version = "0.14.0"
|
||||
version = "0.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b417bedc008acbdf6d6b4bc482d29859924114bbe2650b7921fb68a261d0aa6"
|
||||
checksum = "f3faaf9e727533a19351a43cc5a8de957372163c7d35cc48c90b75cdda13c352"
|
||||
dependencies = [
|
||||
"percent-encoding",
|
||||
"serde",
|
||||
@@ -4586,9 +4598,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.10.8"
|
||||
version = "0.10.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures",
|
||||
@@ -4842,14 +4854,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sysinfo"
|
||||
version = "0.34.2"
|
||||
version = "0.35.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4b93974b3d3aeaa036504b8eefd4c039dced109171c1ae973f1dc63b2c7e4b2"
|
||||
checksum = "b897c8ea620e181c7955369a31be5f48d9a9121cb59fd33ecef9ff2a34323422"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"memchr",
|
||||
"ntapi",
|
||||
"objc2-core-foundation",
|
||||
"objc2-io-kit",
|
||||
"windows",
|
||||
]
|
||||
|
||||
@@ -5059,6 +5072,7 @@ dependencies = [
|
||||
"futures-core",
|
||||
"pin-project-lite",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5093,9 +5107,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.20"
|
||||
version = "0.8.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148"
|
||||
checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
@@ -5105,23 +5119,24 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.8"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
|
||||
checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.22.24"
|
||||
version = "0.22.26"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474"
|
||||
checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e"
|
||||
dependencies = [
|
||||
"indexmap 2.9.0",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_write",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
@@ -5137,6 +5152,12 @@ dependencies = [
|
||||
"thiserror 1.0.69",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_write"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.12.3"
|
||||
@@ -5725,24 +5746,24 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows"
|
||||
version = "0.57.0"
|
||||
version = "0.61.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143"
|
||||
checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419"
|
||||
dependencies = [
|
||||
"windows-core 0.57.0",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-collections",
|
||||
"windows-core",
|
||||
"windows-future",
|
||||
"windows-link",
|
||||
"windows-numerics",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-core"
|
||||
version = "0.57.0"
|
||||
name = "windows-collections"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d"
|
||||
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
|
||||
dependencies = [
|
||||
"windows-implement 0.57.0",
|
||||
"windows-interface 0.57.0",
|
||||
"windows-result 0.1.2",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5751,22 +5772,21 @@ version = "0.61.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980"
|
||||
dependencies = [
|
||||
"windows-implement 0.60.0",
|
||||
"windows-interface 0.59.1",
|
||||
"windows-implement",
|
||||
"windows-interface",
|
||||
"windows-link",
|
||||
"windows-result 0.3.2",
|
||||
"windows-result",
|
||||
"windows-strings 0.4.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-implement"
|
||||
version = "0.57.0"
|
||||
name = "windows-future"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7"
|
||||
checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
"windows-core",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5780,17 +5800,6 @@ dependencies = [
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-interface"
|
||||
version = "0.57.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.100",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-interface"
|
||||
version = "0.59.1"
|
||||
@@ -5808,26 +5817,27 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
|
||||
|
||||
[[package]]
|
||||
name = "windows-numerics"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
|
||||
dependencies = [
|
||||
"windows-core",
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-registry"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3"
|
||||
dependencies = [
|
||||
"windows-result 0.3.2",
|
||||
"windows-result",
|
||||
"windows-strings 0.3.1",
|
||||
"windows-targets 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8"
|
||||
dependencies = [
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.3.2"
|
||||
|
||||
30
Cargo.toml
30
Cargo.toml
@@ -8,7 +8,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.17.4"
|
||||
version = "1.17.5"
|
||||
edition = "2024"
|
||||
authors = ["mbecker20 <becker.maxh@gmail.com>"]
|
||||
license = "GPL-3.0-or-later"
|
||||
@@ -45,18 +45,20 @@ svi = "1.0.1"
|
||||
|
||||
# ASYNC
|
||||
reqwest = { version = "0.12.15", default-features = false, features = ["json", "stream", "rustls-tls-native-roots"] }
|
||||
tokio = { version = "1.44.1", features = ["full"] }
|
||||
tokio-util = { version = "0.7.14", features = ["io", "codec"] }
|
||||
tokio = { version = "1.44.2", features = ["full"] }
|
||||
tokio-util = { version = "0.7.15", features = ["io", "codec"] }
|
||||
tokio-stream = { version = "0.1.17", features = ["sync"] }
|
||||
pin-project-lite = "0.2.16"
|
||||
futures = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
arc-swap = "1.7.1"
|
||||
|
||||
# SERVER
|
||||
tokio-tungstenite = { version = "0.26.2", features = ["rustls-tls-native-roots"] }
|
||||
axum-extra = { version = "0.10.0", features = ["typed-header"] }
|
||||
axum-extra = { version = "0.10.1", features = ["typed-header"] }
|
||||
tower-http = { version = "0.6.2", features = ["fs", "cors"] }
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
axum = { version = "0.8.1", features = ["ws", "json", "macros"] }
|
||||
axum = { version = "0.8.4", features = ["ws", "json", "macros"] }
|
||||
|
||||
# SER/DE
|
||||
ordered_hash_map = { version = "0.4.0", features = ["serde"] }
|
||||
@@ -64,8 +66,8 @@ serde = { version = "1.0.219", features = ["derive"] }
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde_yaml = "0.9.34"
|
||||
serde_qs = "0.14.0"
|
||||
toml = "0.8.20"
|
||||
serde_qs = "0.15.0"
|
||||
toml = "0.8.22"
|
||||
|
||||
# ERROR
|
||||
anyhow = "1.0.98"
|
||||
@@ -77,7 +79,7 @@ opentelemetry_sdk = { version = "0.29.0", features = ["rt-tokio"] }
|
||||
tracing-subscriber = { version = "0.3.19", features = ["json"] }
|
||||
opentelemetry-semantic-conventions = "0.29.0"
|
||||
tracing-opentelemetry = "0.30.0"
|
||||
opentelemetry = "0.29.0"
|
||||
opentelemetry = "0.29.1"
|
||||
tracing = "0.1.41"
|
||||
|
||||
# CONFIG
|
||||
@@ -95,24 +97,24 @@ bcrypt = "0.17.0"
|
||||
base64 = "0.22.1"
|
||||
rustls = "0.23.26"
|
||||
hmac = "0.12.1"
|
||||
sha2 = "0.10.8"
|
||||
sha2 = "0.10.9"
|
||||
rand = "0.9.1"
|
||||
hex = "0.4.3"
|
||||
|
||||
# SYSTEM
|
||||
portable-pty = "0.9.0"
|
||||
bollard = "0.18.1"
|
||||
sysinfo = "0.34.2"
|
||||
sysinfo = "0.35.0"
|
||||
|
||||
# CLOUD
|
||||
aws-config = "1.6.1"
|
||||
aws-sdk-ec2 = "1.121.1"
|
||||
aws-credential-types = "1.2.2"
|
||||
aws-config = "1.6.2"
|
||||
aws-sdk-ec2 = "1.124.0"
|
||||
aws-credential-types = "1.2.3"
|
||||
|
||||
## CRON
|
||||
english-to-cron = "0.1.4"
|
||||
chrono-tz = "0.10.3"
|
||||
chrono = "0.4.40"
|
||||
chrono = "0.4.41"
|
||||
croner = "2.1.0"
|
||||
|
||||
# MISC
|
||||
|
||||
@@ -185,6 +185,9 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
Execution::PullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::BatchPullStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
Execution::StartStack(data) => {
|
||||
println!("{}: {data:?}", "Data".dimmed())
|
||||
}
|
||||
@@ -429,6 +432,10 @@ pub async fn run(execution: Execution) -> anyhow::Result<()> {
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Single),
|
||||
Execution::BatchPullStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
.map(ExecutionResult::Batch),
|
||||
Execution::StartStack(request) => komodo_client()
|
||||
.execute(request)
|
||||
.await
|
||||
|
||||
@@ -260,9 +260,6 @@ fn resource_link(
|
||||
ResourceTargetVariant::Action => {
|
||||
format!("/actions/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
format!("/server-templates/{id}")
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
format!("/resource-syncs/{id}")
|
||||
}
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use std::{sync::OnceLock, time::Instant};
|
||||
|
||||
use axum::{Router, http::HeaderMap, routing::post};
|
||||
use axum::{Router, extract::Path, http::HeaderMap, routing::post};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::auth::*, entities::user::User};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
@@ -22,6 +23,8 @@ use crate::{
|
||||
state::jwt_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub struct AuthArgs {
|
||||
pub headers: HeaderMap,
|
||||
}
|
||||
@@ -45,7 +48,9 @@ pub enum AuthRequest {
|
||||
}
|
||||
|
||||
pub fn router() -> Router {
|
||||
let mut router = Router::new().route("/", post(handler));
|
||||
let mut router = Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler));
|
||||
|
||||
if core_config().local_auth {
|
||||
info!("🔑 Local Login Enabled");
|
||||
@@ -69,6 +74,18 @@ pub fn router() -> Router {
|
||||
router
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
headers: HeaderMap,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: AuthRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(headers, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "AuthHandler", level = "debug", skip(headers))]
|
||||
async fn handler(
|
||||
headers: HeaderMap,
|
||||
|
||||
@@ -134,12 +134,18 @@ impl Resolve<ExecuteArgs> for RunAction {
|
||||
""
|
||||
};
|
||||
|
||||
let reload = if action.config.reload_deno_deps {
|
||||
" --reload"
|
||||
} else {
|
||||
""
|
||||
};
|
||||
|
||||
let mut res = run_komodo_command(
|
||||
// Keep this stage name as is, the UI will find the latest update log by matching the stage name
|
||||
"Execute Action",
|
||||
None,
|
||||
format!(
|
||||
"deno run --allow-all{https_cert_flag} {}",
|
||||
"deno run --allow-all{https_cert_flag}{reload} {}",
|
||||
path.display()
|
||||
),
|
||||
)
|
||||
@@ -245,7 +251,7 @@ fn full_contents(contents: &str, key: &str, secret: &str) -> String {
|
||||
let protocol = if *ssl_enabled { "https" } else { "http" };
|
||||
let base_url = format!("{protocol}://localhost:{port}");
|
||||
format!(
|
||||
"import {{ KomodoClient }} from '{base_url}/client/lib.js';
|
||||
"import {{ KomodoClient, Types }} from '{base_url}/client/lib.js';
|
||||
import * as __YAML__ from 'jsr:@std/yaml';
|
||||
import * as __TOML__ from 'jsr:@std/toml';
|
||||
|
||||
@@ -281,7 +287,7 @@ main()
|
||||
console.error('Status:', error.status);
|
||||
console.error(JSON.stringify(error.result, null, 2));
|
||||
}} else {{
|
||||
console.error(JSON.stringify(error, null, 2));
|
||||
console.error(error);
|
||||
}}
|
||||
Deno.exit(1)
|
||||
}});"
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::{pin::Pin, time::Instant};
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use axum_extra::{TypedHeader, headers::ContentType};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use formatting::format_serror;
|
||||
@@ -18,6 +20,7 @@ use mungos::by_id::find_one_by_id;
|
||||
use resolver_api::Resolve;
|
||||
use response::JsonString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
@@ -36,10 +39,11 @@ mod deployment;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub use {
|
||||
deployment::pull_deployment_inner, stack::pull_stack_inner,
|
||||
};
|
||||
@@ -100,6 +104,7 @@ pub enum ExecuteRequest {
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
BatchPullStack(BatchPullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
StopStack(StopStack),
|
||||
@@ -130,9 +135,6 @@ pub enum ExecuteRequest {
|
||||
RunAction(RunAction),
|
||||
BatchRunAction(BatchRunAction),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer(LaunchServer),
|
||||
|
||||
// ==== ALERTER ====
|
||||
TestAlerter(TestAlerter),
|
||||
|
||||
@@ -143,9 +145,22 @@ pub enum ExecuteRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<(TypedHeader<ContentType>, String)> {
|
||||
let req: ExecuteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteRequest>,
|
||||
|
||||
@@ -1,156 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use formatting::format_serror;
|
||||
use komodo_client::{
|
||||
api::{execute::LaunchServer, write::CreateServer},
|
||||
entities::{
|
||||
permission::PermissionLevel,
|
||||
server::PartialServerConfig,
|
||||
server_template::{ServerTemplate, ServerTemplateConfig},
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs,
|
||||
cloud::{
|
||||
aws::ec2::launch_ec2_instance, hetzner::launch_hetzner_server,
|
||||
},
|
||||
helpers::update::update_update,
|
||||
resource,
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::ExecuteArgs;
|
||||
|
||||
impl Resolve<ExecuteArgs> for LaunchServer {
|
||||
#[instrument(name = "LaunchServer", skip(user, update), fields(user_id = user.id, update_id = update.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, update }: &ExecuteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
// validate name isn't already taken by another server
|
||||
if db_client()
|
||||
.servers
|
||||
.find_one(doc! {
|
||||
"name": &self.name
|
||||
})
|
||||
.await
|
||||
.context("failed to query db for servers")?
|
||||
.is_some()
|
||||
{
|
||||
return Err(anyhow!("name is already taken").into());
|
||||
}
|
||||
|
||||
let template = resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Execute,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut update = update.clone();
|
||||
|
||||
update.push_simple_log(
|
||||
"launching server",
|
||||
format!("{:#?}", template.config),
|
||||
);
|
||||
update_update(update.clone()).await?;
|
||||
|
||||
let config = match template.config {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let region = config.region.clone();
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let instance =
|
||||
match launch_ec2_instance(&self.name, config).await {
|
||||
Ok(instance) => instance,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch aws instance\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, instance.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", instance.ip)
|
||||
.into(),
|
||||
region: region.into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let datacenter = config.datacenter;
|
||||
let use_https = config.use_https;
|
||||
let port = config.port;
|
||||
let server =
|
||||
match launch_hetzner_server(&self.name, config).await {
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"launch server",
|
||||
format!("failed to launch hetzner server\n\n{e:#?}"),
|
||||
);
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
return Ok(update);
|
||||
}
|
||||
};
|
||||
update.push_simple_log(
|
||||
"launch server",
|
||||
format!(
|
||||
"successfully launched server {} on ip {}",
|
||||
self.name, server.ip
|
||||
),
|
||||
);
|
||||
let protocol = if use_https { "https" } else { "http" };
|
||||
PartialServerConfig {
|
||||
address: format!("{protocol}://{}:{port}", server.ip)
|
||||
.into(),
|
||||
region: datacenter.as_ref().to_string().into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
match (CreateServer {
|
||||
name: self.name,
|
||||
config,
|
||||
})
|
||||
.resolve(&WriteArgs { user: user.clone() })
|
||||
.await
|
||||
{
|
||||
Ok(server) => {
|
||||
update.push_simple_log(
|
||||
"create server",
|
||||
format!("created server {} ({})", server.name, server.id),
|
||||
);
|
||||
update.other_data = server.id;
|
||||
}
|
||||
Err(e) => {
|
||||
update.push_error_log(
|
||||
"create server",
|
||||
format_serror(
|
||||
&e.error.context("failed to create server").into(),
|
||||
),
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
update.finalize();
|
||||
update_update(update.clone()).await?;
|
||||
Ok(update)
|
||||
}
|
||||
}
|
||||
@@ -385,6 +385,32 @@ impl Resolve<ExecuteArgs> for DeployStackIfChanged {
|
||||
}
|
||||
}
|
||||
|
||||
impl super::BatchExecute for BatchPullStack {
|
||||
type Resource = Stack;
|
||||
fn single_request(stack: String) -> ExecuteRequest {
|
||||
ExecuteRequest::PullStack(PullStack {
|
||||
stack,
|
||||
services: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ExecuteArgs> for BatchPullStack {
|
||||
#[instrument(name = "BatchPullStack", skip(user), fields(user_id = user.id))]
|
||||
async fn resolve(
|
||||
self,
|
||||
ExecuteArgs { user, .. }: &ExecuteArgs,
|
||||
) -> serror::Result<BatchExecutionResponse> {
|
||||
Ok(
|
||||
super::batch_execute::<BatchPullStack>(
|
||||
&self.pattern,
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn pull_stack_inner(
|
||||
mut stack: Stack,
|
||||
services: Vec<String>,
|
||||
|
||||
@@ -16,7 +16,6 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Log, Update},
|
||||
@@ -142,10 +141,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
.servers
|
||||
.get(&name_or_id)
|
||||
.map(|s| s.name.clone()),
|
||||
ResourceTargetVariant::ServerTemplate => all_resources
|
||||
.templates
|
||||
.get(&name_or_id)
|
||||
.map(|t| t.name.clone()),
|
||||
ResourceTargetVariant::Stack => all_resources
|
||||
.stacks
|
||||
.get(&name_or_id)
|
||||
@@ -332,20 +327,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let server_template_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
&all_resources,
|
||||
match_resource_type,
|
||||
match_resources.as_deref(),
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Default::default()
|
||||
};
|
||||
let resource_sync_deltas = if sync.config.include_resources {
|
||||
get_updates_for_execution::<entities::sync::ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
@@ -397,7 +378,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
|
||||
if deploy_cache.is_empty()
|
||||
&& resource_sync_deltas.no_changes()
|
||||
&& server_template_deltas.no_changes()
|
||||
&& server_deltas.no_changes()
|
||||
&& deployment_deltas.no_changes()
|
||||
&& stack_deltas.no_changes()
|
||||
@@ -451,11 +431,6 @@ impl Resolve<ExecuteArgs> for RunSync {
|
||||
&mut update.logs,
|
||||
ResourceSync::execute_sync_updates(resource_sync_deltas).await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
ServerTemplate::execute_sync_updates(server_template_deltas)
|
||||
.await,
|
||||
);
|
||||
maybe_extend(
|
||||
&mut update.logs,
|
||||
Server::execute_sync_updates(server_deltas).await,
|
||||
|
||||
@@ -1,5 +1,11 @@
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod read;
|
||||
pub mod terminal;
|
||||
pub mod user;
|
||||
pub mod write;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct Variant {
|
||||
variant: String,
|
||||
}
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::{collections::HashSet, sync::OnceLock, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
@@ -18,6 +20,7 @@ use komodo_client::{
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
@@ -27,6 +30,8 @@ use crate::{
|
||||
resource,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alert;
|
||||
mod alerter;
|
||||
@@ -38,7 +43,6 @@ mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
@@ -93,12 +97,6 @@ enum ReadRequest {
|
||||
ListActions(ListActions),
|
||||
ListFullActions(ListFullActions),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate(GetServerTemplate),
|
||||
GetServerTemplatesSummary(GetServerTemplatesSummary),
|
||||
ListServerTemplates(ListServerTemplates),
|
||||
ListFullServerTemplates(ListFullServerTemplates),
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary(GetServersSummary),
|
||||
GetServer(GetServer),
|
||||
@@ -224,9 +222,22 @@ enum ReadRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: ReadRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "ReadHandler", level = "debug", skip(user), fields(user_id = user.id))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
@@ -271,6 +282,7 @@ fn core_info() -> &'static GetCoreInfoResponse {
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
github_webhook_owners: config
|
||||
.github_webhook_app
|
||||
.installations
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
use anyhow::Context;
|
||||
use komodo_client::{
|
||||
api::read::*,
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
},
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::mongodb::bson::doc;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
helpers::query::get_all_tags, resource, state::db_client,
|
||||
};
|
||||
|
||||
use super::ReadArgs;
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplate {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplateResponse> {
|
||||
Ok(
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.server_template,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for ListFullServerTemplates {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<ListFullServerTemplatesResponse> {
|
||||
let all_tags = if self.query.tags.is_empty() {
|
||||
vec![]
|
||||
} else {
|
||||
get_all_tags(None).await?
|
||||
};
|
||||
Ok(
|
||||
resource::list_full_for_user::<ServerTemplate>(
|
||||
self.query, user, &all_tags,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<ReadArgs> for GetServerTemplatesSummary {
|
||||
async fn resolve(
|
||||
self,
|
||||
ReadArgs { user }: &ReadArgs,
|
||||
) -> serror::Result<GetServerTemplatesSummaryResponse> {
|
||||
let query = match resource::get_resource_object_ids_for_user::<
|
||||
ServerTemplate,
|
||||
>(user)
|
||||
.await?
|
||||
{
|
||||
Some(ids) => doc! {
|
||||
"_id": { "$in": ids }
|
||||
},
|
||||
None => Document::new(),
|
||||
};
|
||||
let total = db_client()
|
||||
.server_templates
|
||||
.count_documents(query)
|
||||
.await
|
||||
.context("failed to count all server template documents")?;
|
||||
let res = GetServerTemplatesSummaryResponse {
|
||||
total: total as u32,
|
||||
};
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
@@ -9,8 +9,7 @@ use komodo_client::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment,
|
||||
permission::PermissionLevel, procedure::Procedure, repo::Repo,
|
||||
resource::ResourceQuery, server::Server,
|
||||
server_template::ServerTemplate, stack::Stack,
|
||||
resource::ResourceQuery, server::Server, stack::Stack,
|
||||
sync::ResourceSync, toml::ResourcesToml, user::User,
|
||||
},
|
||||
};
|
||||
@@ -132,16 +131,6 @@ async fn get_all_targets(
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::Action(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_for_user::<ServerTemplate>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
user,
|
||||
&all_tags,
|
||||
)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|resource| ResourceTarget::ServerTemplate(resource.id)),
|
||||
);
|
||||
targets.extend(
|
||||
resource::list_full_for_user::<ResourceSync>(
|
||||
ResourceQuery::builder().tags(tags).build(),
|
||||
@@ -241,20 +230,6 @@ impl Resolve<ReadArgs> for ExportResourcesToToml {
|
||||
))
|
||||
}
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
let template = resource::get_check_permissions::<
|
||||
ServerTemplate,
|
||||
>(&id, user, PermissionLevel::Read)
|
||||
.await?;
|
||||
res.server_templates.push(
|
||||
convert_resource::<ServerTemplate>(
|
||||
template,
|
||||
false,
|
||||
vec![],
|
||||
&id_to_tags,
|
||||
),
|
||||
)
|
||||
}
|
||||
ResourceTarget::Server(id) => {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&id,
|
||||
@@ -503,14 +478,6 @@ fn serialize_resources_toml(
|
||||
Builder::push_to_toml_string(builder, &mut toml)?;
|
||||
}
|
||||
|
||||
for server_template in resources.server_templates {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
}
|
||||
toml.push_str("[[server_template]]\n");
|
||||
ServerTemplate::push_to_toml_string(server_template, &mut toml)?;
|
||||
}
|
||||
|
||||
for resource_sync in resources.resource_syncs {
|
||||
if !toml.is_empty() {
|
||||
toml.push_str("\n\n##\n\n");
|
||||
|
||||
@@ -14,7 +14,6 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Update, UpdateListItem},
|
||||
@@ -132,16 +131,6 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "Alerter" });
|
||||
|
||||
let server_template_query =
|
||||
resource::get_resource_ids_for_user::<ServerTemplate>(user)
|
||||
.await?
|
||||
.map(|ids| {
|
||||
doc! {
|
||||
"target.type": "ServerTemplate", "target.id": { "$in": ids }
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| doc! { "target.type": "ServerTemplate" });
|
||||
|
||||
let resource_sync_query =
|
||||
resource::get_resource_ids_for_user::<ResourceSync>(
|
||||
user,
|
||||
@@ -166,7 +155,6 @@ impl Resolve<ReadArgs> for ListUpdates {
|
||||
action_query,
|
||||
alerter_query,
|
||||
builder_query,
|
||||
server_template_query,
|
||||
resource_sync_query,
|
||||
]
|
||||
});
|
||||
@@ -308,14 +296,6 @@ impl Resolve<ReadArgs> for GetUpdate {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
id,
|
||||
user,
|
||||
PermissionLevel::Read,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
id,
|
||||
|
||||
75
bin/core/src/api/terminal.rs
Normal file
75
bin/core/src/api/terminal.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use komodo_client::{
|
||||
api::terminal::ExecuteTerminalBody,
|
||||
entities::{
|
||||
permission::PermissionLevel, server::Server, user::User,
|
||||
},
|
||||
};
|
||||
use serror::Json;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{
|
||||
auth::auth_request, helpers::periphery_client, resource,
|
||||
};
|
||||
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/execute", post(execute))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn execute(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<ExecuteTerminalBody>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
execute_inner(Uuid::new_v4(), request, user).await
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
name = "ExecuteTerminal",
|
||||
skip(user),
|
||||
fields(
|
||||
user_id = user.id,
|
||||
)
|
||||
)]
|
||||
async fn execute_inner(
|
||||
req_id: Uuid,
|
||||
ExecuteTerminalBody {
|
||||
server,
|
||||
terminal,
|
||||
command,
|
||||
}: ExecuteTerminalBody,
|
||||
user: User,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
info!("/terminal request | user: {}", user.username);
|
||||
|
||||
let res = async {
|
||||
let server = resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let periphery = periphery_client(&server)?;
|
||||
|
||||
let stream = periphery
|
||||
.execute_terminal(terminal, command)
|
||||
.await
|
||||
.context("Failed to execute command on periphery")?;
|
||||
|
||||
anyhow::Ok(stream)
|
||||
}
|
||||
.await;
|
||||
|
||||
let stream = match res {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
warn!("/terminal request {req_id} error: {e:#}");
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
|
||||
Ok(axum::body::Body::from_stream(stream.into_line_stream()))
|
||||
}
|
||||
@@ -1,7 +1,9 @@
|
||||
use std::{collections::VecDeque, time::Instant};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{Extension, Json, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Json, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use derive_variants::EnumVariants;
|
||||
use komodo_client::{
|
||||
api::user::*,
|
||||
@@ -12,6 +14,7 @@ use mungos::{by_id::update_one_by_id, mongodb::bson::to_bson};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -21,6 +24,8 @@ use crate::{
|
||||
state::db_client,
|
||||
};
|
||||
|
||||
use super::Variant;
|
||||
|
||||
pub struct UserArgs {
|
||||
pub user: User,
|
||||
}
|
||||
@@ -43,9 +48,22 @@ enum UserRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: UserRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
#[instrument(name = "UserHandler", level = "debug", skip(user))]
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
|
||||
@@ -4,8 +4,7 @@ use komodo_client::{
|
||||
entities::{
|
||||
ResourceTarget, action::Action, alerter::Alerter, build::Build,
|
||||
builder::Builder, deployment::Deployment, procedure::Procedure,
|
||||
repo::Repo, server::Server, server_template::ServerTemplate,
|
||||
stack::Stack, sync::ResourceSync,
|
||||
repo::Repo, server::Server, stack::Stack, sync::ResourceSync,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
@@ -93,14 +92,6 @@ impl Resolve<WriteArgs> for UpdateDescription {
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::update_description::<ServerTemplate>(
|
||||
&id,
|
||||
&self.description,
|
||||
user,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::update_description::<ResourceSync>(
|
||||
&id,
|
||||
|
||||
@@ -1,18 +1,23 @@
|
||||
use std::time::Instant;
|
||||
|
||||
use anyhow::Context;
|
||||
use axum::{Extension, Router, middleware, routing::post};
|
||||
use axum::{
|
||||
Extension, Router, extract::Path, middleware, routing::post,
|
||||
};
|
||||
use derive_variants::{EnumVariants, ExtractVariant};
|
||||
use komodo_client::{api::write::*, entities::user::User};
|
||||
use resolver_api::Resolve;
|
||||
use response::Response;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::json;
|
||||
use serror::Json;
|
||||
use typeshare::typeshare;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::auth::auth_request;
|
||||
|
||||
use super::Variant;
|
||||
|
||||
mod action;
|
||||
mod alerter;
|
||||
mod build;
|
||||
@@ -24,7 +29,6 @@ mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod service_user;
|
||||
mod stack;
|
||||
mod sync;
|
||||
@@ -111,13 +115,6 @@ pub enum WriteRequest {
|
||||
UpdateBuilder(UpdateBuilder),
|
||||
RenameBuilder(RenameBuilder),
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
CreateServerTemplate(CreateServerTemplate),
|
||||
CopyServerTemplate(CopyServerTemplate),
|
||||
DeleteServerTemplate(DeleteServerTemplate),
|
||||
UpdateServerTemplate(UpdateServerTemplate),
|
||||
RenameServerTemplate(RenameServerTemplate),
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo(CreateRepo),
|
||||
CopyRepo(CopyRepo),
|
||||
@@ -198,9 +195,22 @@ pub enum WriteRequest {
|
||||
pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/", post(handler))
|
||||
.route("/{variant}", post(variant_handler))
|
||||
.layer(middleware::from_fn(auth_request))
|
||||
}
|
||||
|
||||
async fn variant_handler(
|
||||
user: Extension<User>,
|
||||
Path(Variant { variant }): Path<Variant>,
|
||||
Json(params): Json<serde_json::Value>,
|
||||
) -> serror::Result<axum::response::Response> {
|
||||
let req: WriteRequest = serde_json::from_value(json!({
|
||||
"type": variant,
|
||||
"params": params,
|
||||
}))?;
|
||||
handler(user, Json(req)).await
|
||||
}
|
||||
|
||||
async fn handler(
|
||||
Extension(user): Extension<User>,
|
||||
Json(request): Json<WriteRequest>,
|
||||
|
||||
@@ -406,20 +406,6 @@ async fn extract_resource_target_with_validation(
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::Action, id))
|
||||
}
|
||||
ResourceTarget::ServerTemplate(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
Err(_) => doc! { "name": ident },
|
||||
};
|
||||
let id = db_client()
|
||||
.server_templates
|
||||
.find_one(filter)
|
||||
.await
|
||||
.context("failed to query db for server templates")?
|
||||
.context("no matching server template found")?
|
||||
.id;
|
||||
Ok((ResourceTargetVariant::ServerTemplate, id))
|
||||
}
|
||||
ResourceTarget::ResourceSync(ident) => {
|
||||
let filter = match ObjectId::from_str(ident) {
|
||||
Ok(id) => doc! { "_id": id },
|
||||
|
||||
@@ -1,92 +0,0 @@
|
||||
use komodo_client::{
|
||||
api::write::{
|
||||
CopyServerTemplate, CreateServerTemplate, DeleteServerTemplate,
|
||||
RenameServerTemplate, UpdateServerTemplate,
|
||||
},
|
||||
entities::{
|
||||
permission::PermissionLevel, server_template::ServerTemplate,
|
||||
update::Update,
|
||||
},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::resource;
|
||||
|
||||
use super::WriteArgs;
|
||||
|
||||
impl Resolve<WriteArgs> for CreateServerTemplate {
|
||||
#[instrument(name = "CreateServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(
|
||||
resource::create::<ServerTemplate>(
|
||||
&self.name,
|
||||
self.config,
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for CopyServerTemplate {
|
||||
#[instrument(name = "CopyServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
let ServerTemplate { config, .. } =
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&self.id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
Ok(
|
||||
resource::create::<ServerTemplate>(
|
||||
&self.name,
|
||||
config.into(),
|
||||
user,
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for DeleteServerTemplate {
|
||||
#[instrument(name = "DeleteServerTemplate", skip(args))]
|
||||
async fn resolve(
|
||||
self,
|
||||
args: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(resource::delete::<ServerTemplate>(&self.id, args).await?)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for UpdateServerTemplate {
|
||||
#[instrument(name = "UpdateServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<ServerTemplate> {
|
||||
Ok(
|
||||
resource::update::<ServerTemplate>(&self.id, self.config, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Resolve<WriteArgs> for RenameServerTemplate {
|
||||
#[instrument(name = "RenameServerTemplate", skip(user))]
|
||||
async fn resolve(
|
||||
self,
|
||||
WriteArgs { user }: &WriteArgs,
|
||||
) -> serror::Result<Update> {
|
||||
Ok(
|
||||
resource::rename::<ServerTemplate>(&self.id, &self.name, user)
|
||||
.await?,
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,6 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::{
|
||||
PartialResourceSyncConfig, ResourceSync, ResourceSyncInfo,
|
||||
@@ -686,17 +685,6 @@ impl Resolve<WriteArgs> for RefreshResourceSyncPending {
|
||||
&mut diffs,
|
||||
)
|
||||
.await?;
|
||||
push_updates_for_view::<ServerTemplate>(
|
||||
resources.server_templates,
|
||||
delete,
|
||||
&all_resources,
|
||||
None,
|
||||
None,
|
||||
&id_to_tags,
|
||||
&sync.config.match_tags,
|
||||
&mut diffs,
|
||||
)
|
||||
.await?;
|
||||
push_updates_for_view::<ResourceSync>(
|
||||
resources.resource_syncs,
|
||||
delete,
|
||||
|
||||
@@ -17,7 +17,6 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::{Tag, TagColor},
|
||||
@@ -131,7 +130,6 @@ impl Resolve<WriteArgs> for DeleteTag {
|
||||
resource::remove_tag_from_all::<Builder>(&self.id),
|
||||
resource::remove_tag_from_all::<Alerter>(&self.id),
|
||||
resource::remove_tag_from_all::<Procedure>(&self.id),
|
||||
resource::remove_tag_from_all::<ServerTemplate>(&self.id),
|
||||
)?;
|
||||
|
||||
delete_one_by_id(&db_client().tags, &self.id, None).await?;
|
||||
@@ -225,16 +223,6 @@ impl Resolve<WriteArgs> for UpdateTagsOnResource {
|
||||
.await?;
|
||||
resource::update_tags::<Action>(&id, self.tags, args).await?
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
resource::get_check_permissions::<ServerTemplate>(
|
||||
&id,
|
||||
user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await?;
|
||||
resource::update_tags::<ServerTemplate>(&id, self.tags, args)
|
||||
.await?
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get_check_permissions::<ResourceSync>(
|
||||
&id,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{str::FromStr, time::Duration};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use aws_config::{BehaviorVersion, Region};
|
||||
@@ -8,15 +8,15 @@ use aws_sdk_ec2::{
|
||||
BlockDeviceMapping, EbsBlockDevice,
|
||||
InstanceNetworkInterfaceSpecification, InstanceStateChange,
|
||||
InstanceStateName, InstanceStatus, InstanceType, ResourceType,
|
||||
Tag, TagSpecification, VolumeType,
|
||||
Tag, TagSpecification,
|
||||
},
|
||||
};
|
||||
use base64::Engine;
|
||||
use komodo_client::entities::{
|
||||
ResourceTarget,
|
||||
alert::{Alert, AlertData, SeverityLevel},
|
||||
builder::AwsBuilderConfig,
|
||||
komodo_timestamp,
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
};
|
||||
|
||||
use crate::{alert::send_alerts, config::core_config};
|
||||
@@ -71,12 +71,12 @@ async fn create_ec2_client(region: String) -> Client {
|
||||
#[instrument]
|
||||
pub async fn launch_ec2_instance(
|
||||
name: &str,
|
||||
config: AwsServerTemplateConfig,
|
||||
config: &AwsBuilderConfig,
|
||||
) -> anyhow::Result<Ec2Instance> {
|
||||
let AwsServerTemplateConfig {
|
||||
let AwsBuilderConfig {
|
||||
region,
|
||||
instance_type,
|
||||
volumes,
|
||||
volume_gb,
|
||||
ami_id,
|
||||
subnet_id,
|
||||
security_group_ids,
|
||||
@@ -86,19 +86,22 @@ pub async fn launch_ec2_instance(
|
||||
user_data,
|
||||
port: _,
|
||||
use_https: _,
|
||||
git_providers: _,
|
||||
docker_registries: _,
|
||||
secrets: _,
|
||||
} = config;
|
||||
let instance_type = handle_unknown_instance_type(
|
||||
InstanceType::from(instance_type.as_str()),
|
||||
)?;
|
||||
let client = create_ec2_client(region.clone()).await;
|
||||
let mut req = client
|
||||
let req = client
|
||||
.run_instances()
|
||||
.image_id(ami_id)
|
||||
.instance_type(instance_type)
|
||||
.network_interfaces(
|
||||
InstanceNetworkInterfaceSpecification::builder()
|
||||
.subnet_id(subnet_id)
|
||||
.associate_public_ip_address(assign_public_ip)
|
||||
.associate_public_ip_address(*assign_public_ip)
|
||||
.set_groups(security_group_ids.to_vec().into())
|
||||
.device_index(0)
|
||||
.build(),
|
||||
@@ -110,6 +113,17 @@ pub async fn launch_ec2_instance(
|
||||
.resource_type(ResourceType::Instance)
|
||||
.build(),
|
||||
)
|
||||
.block_device_mappings(
|
||||
BlockDeviceMapping::builder()
|
||||
.set_device_name("/dev/sda1".to_string().into())
|
||||
.set_ebs(
|
||||
EbsBlockDevice::builder()
|
||||
.volume_size(*volume_gb)
|
||||
.build()
|
||||
.into(),
|
||||
)
|
||||
.build(),
|
||||
)
|
||||
.min_count(1)
|
||||
.max_count(1)
|
||||
.user_data(
|
||||
@@ -117,26 +131,6 @@ pub async fn launch_ec2_instance(
|
||||
.encode(user_data),
|
||||
);
|
||||
|
||||
for volume in volumes {
|
||||
let ebs = EbsBlockDevice::builder()
|
||||
.volume_size(volume.size_gb)
|
||||
.volume_type(
|
||||
VolumeType::from_str(volume.volume_type.as_ref())
|
||||
.context("invalid volume type")?,
|
||||
)
|
||||
.set_iops((volume.iops != 0).then_some(volume.iops))
|
||||
.set_throughput(
|
||||
(volume.throughput != 0).then_some(volume.throughput),
|
||||
)
|
||||
.build();
|
||||
req = req.block_device_mappings(
|
||||
BlockDeviceMapping::builder()
|
||||
.set_device_name(volume.device_name.into())
|
||||
.set_ebs(ebs.into())
|
||||
.build(),
|
||||
)
|
||||
}
|
||||
|
||||
let res = req
|
||||
.send()
|
||||
.await
|
||||
@@ -156,7 +150,7 @@ pub async fn launch_ec2_instance(
|
||||
let state_name =
|
||||
get_ec2_instance_state_name(&client, &instance_id).await?;
|
||||
if state_name == Some(InstanceStateName::Running) {
|
||||
let ip = if use_public_ip {
|
||||
let ip = if *use_public_ip {
|
||||
get_ec2_instance_public_ip(&client, &instance_id).await?
|
||||
} else {
|
||||
instance
|
||||
|
||||
@@ -1,157 +0,0 @@
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::http::{HeaderName, HeaderValue};
|
||||
use reqwest::{RequestBuilder, StatusCode};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
|
||||
use super::{
|
||||
common::{
|
||||
HetznerActionResponse, HetznerDatacenterResponse,
|
||||
HetznerServerResponse, HetznerVolumeResponse,
|
||||
},
|
||||
create_server::{CreateServerBody, CreateServerResponse},
|
||||
create_volume::{CreateVolumeBody, CreateVolumeResponse},
|
||||
};
|
||||
|
||||
const BASE_URL: &str = "https://api.hetzner.cloud/v1";
|
||||
|
||||
pub struct HetznerClient(reqwest::Client);
|
||||
|
||||
impl HetznerClient {
|
||||
pub fn new(token: &str) -> HetznerClient {
|
||||
HetznerClient(
|
||||
reqwest::ClientBuilder::new()
|
||||
.default_headers(
|
||||
[(
|
||||
HeaderName::from_static("authorization"),
|
||||
HeaderValue::from_str(&format!("Bearer {token}"))
|
||||
.unwrap(),
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
)
|
||||
.build()
|
||||
.context("failed to build Hetzner request client")
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_server(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerServerResponse> {
|
||||
self.get(&format!("/servers/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn create_server(
|
||||
&self,
|
||||
body: &CreateServerBody,
|
||||
) -> anyhow::Result<CreateServerResponse> {
|
||||
self.post("/servers", body).await
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn delete_server(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerActionResponse> {
|
||||
self.delete(&format!("/servers/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn get_volume(
|
||||
&self,
|
||||
id: i64,
|
||||
) -> anyhow::Result<HetznerVolumeResponse> {
|
||||
self.get(&format!("/volumes/{id}")).await
|
||||
}
|
||||
|
||||
pub async fn create_volume(
|
||||
&self,
|
||||
body: &CreateVolumeBody,
|
||||
) -> anyhow::Result<CreateVolumeResponse> {
|
||||
self.post("/volumes", body).await
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn delete_volume(&self, id: i64) -> anyhow::Result<()> {
|
||||
let res = self
|
||||
.0
|
||||
.delete(format!("{BASE_URL}/volumes/{id}"))
|
||||
.send()
|
||||
.await
|
||||
.context("failed at request to delete volume")?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status == StatusCode::NO_CONTENT {
|
||||
Ok(())
|
||||
} else {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response body as text")?;
|
||||
Err(anyhow!("{status} | {text}"))
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
pub async fn list_datacenters(
|
||||
&self,
|
||||
) -> anyhow::Result<HetznerDatacenterResponse> {
|
||||
self.get("/datacenters").await
|
||||
}
|
||||
|
||||
async fn get<Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.get(format!("{BASE_URL}{path}"));
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at GET request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
|
||||
async fn post<Body: Serialize, Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
body: &Body,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.post(format!("{BASE_URL}{path}")).json(&body);
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at POST request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
async fn delete<Res: DeserializeOwned>(
|
||||
&self,
|
||||
path: &str,
|
||||
) -> anyhow::Result<Res> {
|
||||
let req = self.0.delete(format!("{BASE_URL}{path}"));
|
||||
handle_req(req).await.with_context(|| {
|
||||
format!("failed at DELETE request to Hetzner | path: {path}")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn handle_req<Res: DeserializeOwned>(
|
||||
req: RequestBuilder,
|
||||
) -> anyhow::Result<Res> {
|
||||
let res = req.send().await?;
|
||||
|
||||
let status = res.status();
|
||||
|
||||
if status.is_success() {
|
||||
res.json().await.context("failed to parse response to json")
|
||||
} else {
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("failed to get response body as text")?;
|
||||
if let Ok(json_error) =
|
||||
serde_json::from_str::<serde_json::Value>(&text)
|
||||
{
|
||||
return Err(anyhow!("{status} | {json_error:?}"));
|
||||
}
|
||||
Err(anyhow!("{status} | {text}"))
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServerResponse {
|
||||
pub server: HetznerServer,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServer {
|
||||
pub id: i64,
|
||||
pub name: String,
|
||||
pub primary_disk_size: f64,
|
||||
pub image: Option<HetznerImage>,
|
||||
pub private_net: Vec<HetznerPrivateNet>,
|
||||
pub public_net: HetznerPublicNet,
|
||||
pub server_type: HetznerServerTypeDetails,
|
||||
pub status: HetznerServerStatus,
|
||||
#[serde(default)]
|
||||
pub volumes: Vec<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerServerTypeDetails {
|
||||
pub architecture: String,
|
||||
pub cores: i64,
|
||||
pub cpu_type: String,
|
||||
pub description: String,
|
||||
pub disk: f64,
|
||||
pub id: i64,
|
||||
pub memory: f64,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerPrivateNet {
|
||||
pub alias_ips: Vec<String>,
|
||||
pub ip: String,
|
||||
pub mac_address: String,
|
||||
pub network: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerPublicNet {
|
||||
#[serde(default)]
|
||||
pub firewalls: Vec<HetznerFirewall>,
|
||||
pub floating_ips: Vec<i64>,
|
||||
pub ipv4: Option<HetznerIpv4>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerFirewall {
|
||||
pub id: i64,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerIpv4 {
|
||||
pub id: Option<i64>,
|
||||
pub blocked: bool,
|
||||
pub dns_ptr: String,
|
||||
pub ip: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerImage {
|
||||
pub id: i64,
|
||||
pub description: String,
|
||||
pub name: Option<String>,
|
||||
pub os_flavor: String,
|
||||
pub os_version: Option<String>,
|
||||
pub rapid_deploy: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerActionResponse {
|
||||
pub action: HetznerAction,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerAction {
|
||||
pub command: String,
|
||||
pub error: Option<HetznerError>,
|
||||
pub finished: Option<String>,
|
||||
pub id: i64,
|
||||
pub progress: i32,
|
||||
pub resources: Vec<HetznerResource>,
|
||||
pub started: String,
|
||||
pub status: HetznerActionStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerError {
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerResource {
|
||||
pub id: i64,
|
||||
#[serde(rename = "type")]
|
||||
pub ty: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerVolumeResponse {
|
||||
pub volume: HetznerVolume,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerVolume {
|
||||
/// Name of the Resource. Must be unique per Project.
|
||||
pub name: String,
|
||||
/// Point in time when the Resource was created (in ISO-8601 format).
|
||||
pub created: String,
|
||||
/// Filesystem of the Volume if formatted on creation, null if not formatted on creation
|
||||
pub format: Option<HetznerVolumeFormat>,
|
||||
/// ID of the Volume.
|
||||
pub id: i64,
|
||||
/// User-defined labels ( key/value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Device path on the file system for the Volume
|
||||
pub linux_device: String,
|
||||
/// Protection configuration for the Resource.
|
||||
pub protection: HetznerProtection,
|
||||
/// ID of the Server the Volume is attached to, null if it is not attached at all
|
||||
pub server: Option<i64>,
|
||||
/// Size in GB of the Volume
|
||||
pub size: i64,
|
||||
/// Current status of the Volume. Allowed: `creating`, `available`
|
||||
pub status: HetznerVolumeStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerProtection {
|
||||
/// Prevent the Resource from being deleted.
|
||||
pub delete: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerDatacenterResponse {
|
||||
pub datacenters: Vec<HetznerDatacenterDetails>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct HetznerDatacenterDetails {
|
||||
pub id: i64,
|
||||
pub name: String,
|
||||
pub location: serde_json::Map<String, serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum HetznerLocation {
|
||||
#[serde(rename = "nbg1")]
|
||||
Nuremberg1,
|
||||
#[serde(rename = "hel1")]
|
||||
Helsinki1,
|
||||
#[serde(rename = "fsn1")]
|
||||
Falkenstein1,
|
||||
#[serde(rename = "ash")]
|
||||
Ashburn,
|
||||
#[serde(rename = "hil")]
|
||||
Hillsboro,
|
||||
#[serde(rename = "sin")]
|
||||
Singapore,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
|
||||
pub enum HetznerDatacenter {
|
||||
#[serde(rename = "nbg1-dc3")]
|
||||
Nuremberg1Dc3,
|
||||
#[serde(rename = "hel1-dc2")]
|
||||
Helsinki1Dc2,
|
||||
#[serde(rename = "fsn1-dc14")]
|
||||
Falkenstein1Dc14,
|
||||
#[serde(rename = "ash-dc1")]
|
||||
AshburnDc1,
|
||||
#[serde(rename = "hil-dc1")]
|
||||
HillsboroDc1,
|
||||
#[serde(rename = "sin-dc1")]
|
||||
SingaporeDc1,
|
||||
}
|
||||
|
||||
impl From<HetznerDatacenter> for HetznerLocation {
|
||||
fn from(value: HetznerDatacenter) -> Self {
|
||||
match value {
|
||||
HetznerDatacenter::Nuremberg1Dc3 => HetznerLocation::Nuremberg1,
|
||||
HetznerDatacenter::Helsinki1Dc2 => HetznerLocation::Helsinki1,
|
||||
HetznerDatacenter::Falkenstein1Dc14 => {
|
||||
HetznerLocation::Falkenstein1
|
||||
}
|
||||
HetznerDatacenter::AshburnDc1 => HetznerLocation::Ashburn,
|
||||
HetznerDatacenter::HillsboroDc1 => HetznerLocation::Hillsboro,
|
||||
HetznerDatacenter::SingaporeDc1 => HetznerLocation::Singapore,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerVolumeFormat {
|
||||
Xfs,
|
||||
Ext4,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerVolumeStatus {
|
||||
Creating,
|
||||
Available,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerServerStatus {
|
||||
Running,
|
||||
Initializing,
|
||||
Starting,
|
||||
Stopping,
|
||||
Off,
|
||||
Deleting,
|
||||
Migrating,
|
||||
Rebuilding,
|
||||
Unknown,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum HetznerActionStatus {
|
||||
Running,
|
||||
Success,
|
||||
Error,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum HetznerServerType {
|
||||
// Shared
|
||||
#[serde(rename = "cpx11")]
|
||||
SharedAmd2Core2Ram40Disk,
|
||||
#[serde(rename = "cax11")]
|
||||
SharedArm2Core4Ram40Disk,
|
||||
#[serde(rename = "cx22")]
|
||||
SharedIntel2Core4Ram40Disk,
|
||||
#[serde(rename = "cpx21")]
|
||||
SharedAmd3Core4Ram80Disk,
|
||||
#[serde(rename = "cax21")]
|
||||
SharedArm4Core8Ram80Disk,
|
||||
#[serde(rename = "cx32")]
|
||||
SharedIntel4Core8Ram80Disk,
|
||||
#[serde(rename = "cpx31")]
|
||||
SharedAmd4Core8Ram160Disk,
|
||||
#[serde(rename = "cax31")]
|
||||
SharedArm8Core16Ram160Disk,
|
||||
#[serde(rename = "cx42")]
|
||||
SharedIntel8Core16Ram160Disk,
|
||||
#[serde(rename = "cpx41")]
|
||||
SharedAmd8Core16Ram240Disk,
|
||||
#[serde(rename = "cax41")]
|
||||
SharedArm16Core32Ram320Disk,
|
||||
#[serde(rename = "cx52")]
|
||||
SharedIntel16Core32Ram320Disk,
|
||||
#[serde(rename = "cpx51")]
|
||||
SharedAmd16Core32Ram360Disk,
|
||||
// Dedicated
|
||||
#[serde(rename = "ccx13")]
|
||||
DedicatedAmd2Core8Ram80Disk,
|
||||
#[serde(rename = "ccx23")]
|
||||
DedicatedAmd4Core16Ram160Disk,
|
||||
#[serde(rename = "ccx33")]
|
||||
DedicatedAmd8Core32Ram240Disk,
|
||||
#[serde(rename = "ccx43")]
|
||||
DedicatedAmd16Core64Ram360Disk,
|
||||
#[serde(rename = "ccx53")]
|
||||
DedicatedAmd32Core128Ram600Disk,
|
||||
#[serde(rename = "ccx63")]
|
||||
DedicatedAmd48Core192Ram960Disk,
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::common::{
|
||||
HetznerAction, HetznerDatacenter, HetznerLocation, HetznerServer,
|
||||
HetznerServerType,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CreateServerBody {
|
||||
/// Name of the Server to create (must be unique per Project and a valid hostname as per RFC 1123)
|
||||
pub name: String,
|
||||
/// Auto-mount Volumes after attach
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub automount: Option<bool>,
|
||||
/// ID or name of Datacenter to create Server in (must not be used together with location)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub datacenter: Option<HetznerDatacenter>,
|
||||
/// ID or name of Location to create Server in (must not be used together with datacenter)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub location: Option<HetznerLocation>,
|
||||
/// Firewalls which should be applied on the Server's public network interface at creation time
|
||||
pub firewalls: Vec<Firewall>,
|
||||
/// ID or name of the Image the Server is created from
|
||||
pub image: String,
|
||||
/// User-defined labels (key-value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Network IDs which should be attached to the Server private network interface at the creation time
|
||||
pub networks: Vec<i64>,
|
||||
/// ID of the Placement Group the server should be in
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub placement_group: Option<i64>,
|
||||
/// Public Network options
|
||||
pub public_net: PublicNet,
|
||||
/// ID or name of the Server type this Server should be created with
|
||||
pub server_type: HetznerServerType,
|
||||
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
|
||||
pub ssh_keys: Vec<String>,
|
||||
/// This automatically triggers a Power on a Server-Server Action after the creation is finished and is returned in the next_actions response object.
|
||||
pub start_after_create: bool,
|
||||
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_data: Option<String>,
|
||||
/// Volume IDs which should be attached to the Server at the creation time. Volumes must be in the same Location.
|
||||
pub volumes: Vec<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
pub struct Firewall {
|
||||
/// ID of the Firewall
|
||||
pub firewall: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Serialize)]
|
||||
pub struct PublicNet {
|
||||
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
|
||||
pub enable_ipv4: bool,
|
||||
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
|
||||
pub enable_ipv6: bool,
|
||||
/// ID of the ipv4 Primary IP to use. If omitted and enable_ipv4 is true, a new ipv4 Primary IP will automatically be created.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv4: Option<i64>,
|
||||
/// ID of the ipv6 Primary IP to use. If omitted and enable_ipv6 is true, a new ipv6 Primary IP will automatically be created.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub ipv6: Option<i64>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct CreateServerResponse {
|
||||
pub action: HetznerAction,
|
||||
pub next_actions: Vec<HetznerAction>,
|
||||
pub root_password: Option<String>,
|
||||
pub server: HetznerServer,
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use super::common::{
|
||||
HetznerAction, HetznerLocation, HetznerVolume, HetznerVolumeFormat,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct CreateVolumeBody {
|
||||
/// Name of the volume
|
||||
pub name: String,
|
||||
/// Auto-mount Volume after attach. server must be provided.
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub automount: Option<bool>,
|
||||
/// Format Volume after creation. One of: xfs, ext4
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub format: Option<HetznerVolumeFormat>,
|
||||
/// User-defined labels (key-value pairs) for the Resource
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Location to create the Volume in (can be omitted if Server is specified)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub location: Option<HetznerLocation>,
|
||||
/// Server to which to attach the Volume once it's created (Volume will be created in the same Location as the server)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub server: Option<i64>,
|
||||
/// Size of the Volume in GB
|
||||
pub size: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
pub struct CreateVolumeResponse {
|
||||
pub action: HetznerAction,
|
||||
pub next_actions: Vec<HetznerAction>,
|
||||
pub volume: HetznerVolume,
|
||||
}
|
||||
@@ -1,281 +0,0 @@
|
||||
use std::{
|
||||
sync::{Arc, Mutex, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use futures::future::join_all;
|
||||
use komodo_client::entities::server_template::hetzner::{
|
||||
HetznerDatacenter, HetznerServerTemplateConfig, HetznerServerType,
|
||||
HetznerVolumeFormat,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
cloud::hetzner::{
|
||||
common::HetznerServerStatus, create_server::CreateServerBody,
|
||||
create_volume::CreateVolumeBody,
|
||||
},
|
||||
config::core_config,
|
||||
};
|
||||
|
||||
use self::{client::HetznerClient, common::HetznerVolumeStatus};
|
||||
|
||||
mod client;
|
||||
mod common;
|
||||
mod create_server;
|
||||
mod create_volume;
|
||||
|
||||
fn hetzner() -> Option<&'static HetznerClient> {
|
||||
static HETZNER_CLIENT: OnceLock<Option<HetznerClient>> =
|
||||
OnceLock::new();
|
||||
HETZNER_CLIENT
|
||||
.get_or_init(|| {
|
||||
let token = &core_config().hetzner.token;
|
||||
(!token.is_empty()).then(|| HetznerClient::new(token))
|
||||
})
|
||||
.as_ref()
|
||||
}
|
||||
|
||||
pub struct HetznerServerMinimal {
|
||||
pub id: i64,
|
||||
pub ip: String,
|
||||
}
|
||||
|
||||
const POLL_RATE_SECS: u64 = 3;
|
||||
const MAX_POLL_TRIES: usize = 100;
|
||||
|
||||
#[instrument]
|
||||
pub async fn launch_hetzner_server(
|
||||
name: &str,
|
||||
config: HetznerServerTemplateConfig,
|
||||
) -> anyhow::Result<HetznerServerMinimal> {
|
||||
let hetzner =
|
||||
*hetzner().as_ref().context("Hetzner token not configured")?;
|
||||
let HetznerServerTemplateConfig {
|
||||
image,
|
||||
datacenter,
|
||||
private_network_ids,
|
||||
placement_group,
|
||||
enable_public_ipv4,
|
||||
enable_public_ipv6,
|
||||
firewall_ids,
|
||||
server_type,
|
||||
ssh_keys,
|
||||
user_data,
|
||||
use_public_ip,
|
||||
labels,
|
||||
volumes,
|
||||
port: _,
|
||||
use_https: _,
|
||||
} = config;
|
||||
let datacenter = hetzner_datacenter(datacenter);
|
||||
|
||||
// Create volumes and get their ids
|
||||
let mut volume_ids = Vec::new();
|
||||
for volume in volumes {
|
||||
let body = CreateVolumeBody {
|
||||
name: volume.name,
|
||||
format: Some(hetzner_format(volume.format)),
|
||||
location: Some(datacenter.into()),
|
||||
labels: volume.labels,
|
||||
size: volume.size_gb,
|
||||
automount: None,
|
||||
server: None,
|
||||
};
|
||||
let id = hetzner
|
||||
.create_volume(&body)
|
||||
.await
|
||||
.context("failed to create hetzner volume")?
|
||||
.volume
|
||||
.id;
|
||||
volume_ids.push(id);
|
||||
}
|
||||
|
||||
// Make sure volumes are available before continue
|
||||
let vol_ids_poll = Arc::new(Mutex::new(volume_ids.clone()));
|
||||
for _ in 0..MAX_POLL_TRIES {
|
||||
if vol_ids_poll.lock().unwrap().is_empty() {
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
|
||||
let ids = vol_ids_poll.lock().unwrap().clone();
|
||||
let futures = ids.into_iter().map(|id| {
|
||||
let vol_ids = vol_ids_poll.clone();
|
||||
async move {
|
||||
let Ok(res) = hetzner.get_volume(id).await else {
|
||||
return;
|
||||
};
|
||||
if matches!(res.volume.status, HetznerVolumeStatus::Available)
|
||||
{
|
||||
vol_ids.lock().unwrap().retain(|_id| *_id != id);
|
||||
}
|
||||
}
|
||||
});
|
||||
join_all(futures).await;
|
||||
}
|
||||
if !vol_ids_poll.lock().unwrap().is_empty() {
|
||||
return Err(anyhow!("Volumes not ready after poll"));
|
||||
}
|
||||
|
||||
let body = CreateServerBody {
|
||||
name: name.to_string(),
|
||||
automount: None,
|
||||
datacenter: Some(datacenter),
|
||||
location: None,
|
||||
firewalls: firewall_ids
|
||||
.into_iter()
|
||||
.map(|firewall| create_server::Firewall { firewall })
|
||||
.collect(),
|
||||
image,
|
||||
labels,
|
||||
networks: private_network_ids,
|
||||
placement_group: (placement_group > 0).then_some(placement_group),
|
||||
public_net: create_server::PublicNet {
|
||||
enable_ipv4: enable_public_ipv4,
|
||||
enable_ipv6: enable_public_ipv6,
|
||||
ipv4: None,
|
||||
ipv6: None,
|
||||
},
|
||||
server_type: hetzner_server_type(server_type),
|
||||
ssh_keys,
|
||||
start_after_create: true,
|
||||
user_data: (!user_data.is_empty()).then_some(user_data),
|
||||
volumes: volume_ids,
|
||||
};
|
||||
|
||||
let server_id = hetzner
|
||||
.create_server(&body)
|
||||
.await
|
||||
.context("failed to create hetnzer server")?
|
||||
.server
|
||||
.id;
|
||||
|
||||
for _ in 0..MAX_POLL_TRIES {
|
||||
tokio::time::sleep(Duration::from_secs(POLL_RATE_SECS)).await;
|
||||
let Ok(res) = hetzner.get_server(server_id).await else {
|
||||
continue;
|
||||
};
|
||||
if matches!(res.server.status, HetznerServerStatus::Running) {
|
||||
let ip = if use_public_ip {
|
||||
res
|
||||
.server
|
||||
.public_net
|
||||
.ipv4
|
||||
.context("instance does not have public ipv4 attached")?
|
||||
.ip
|
||||
} else {
|
||||
res
|
||||
.server
|
||||
.private_net
|
||||
.first()
|
||||
.context("no private networks attached")?
|
||||
.ip
|
||||
.to_string()
|
||||
};
|
||||
let server = HetznerServerMinimal { id: server_id, ip };
|
||||
return Ok(server);
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!(
|
||||
"failed to verify server running after polling status"
|
||||
))
|
||||
}
|
||||
|
||||
fn hetzner_format(
|
||||
format: HetznerVolumeFormat,
|
||||
) -> common::HetznerVolumeFormat {
|
||||
match format {
|
||||
HetznerVolumeFormat::Xfs => common::HetznerVolumeFormat::Xfs,
|
||||
HetznerVolumeFormat::Ext4 => common::HetznerVolumeFormat::Ext4,
|
||||
}
|
||||
}
|
||||
|
||||
fn hetzner_datacenter(
|
||||
datacenter: HetznerDatacenter,
|
||||
) -> common::HetznerDatacenter {
|
||||
match datacenter {
|
||||
HetznerDatacenter::Nuremberg1Dc3 => {
|
||||
common::HetznerDatacenter::Nuremberg1Dc3
|
||||
}
|
||||
HetznerDatacenter::Helsinki1Dc2 => {
|
||||
common::HetznerDatacenter::Helsinki1Dc2
|
||||
}
|
||||
HetznerDatacenter::Falkenstein1Dc14 => {
|
||||
common::HetznerDatacenter::Falkenstein1Dc14
|
||||
}
|
||||
HetznerDatacenter::AshburnDc1 => {
|
||||
common::HetznerDatacenter::AshburnDc1
|
||||
}
|
||||
HetznerDatacenter::HillsboroDc1 => {
|
||||
common::HetznerDatacenter::HillsboroDc1
|
||||
}
|
||||
HetznerDatacenter::SingaporeDc1 => {
|
||||
common::HetznerDatacenter::SingaporeDc1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn hetzner_server_type(
|
||||
server_type: HetznerServerType,
|
||||
) -> common::HetznerServerType {
|
||||
match server_type {
|
||||
HetznerServerType::SharedAmd2Core2Ram40Disk => {
|
||||
common::HetznerServerType::SharedAmd2Core2Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedArm2Core4Ram40Disk => {
|
||||
common::HetznerServerType::SharedArm2Core4Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel2Core4Ram40Disk => {
|
||||
common::HetznerServerType::SharedIntel2Core4Ram40Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd3Core4Ram80Disk => {
|
||||
common::HetznerServerType::SharedAmd3Core4Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedArm4Core8Ram80Disk => {
|
||||
common::HetznerServerType::SharedArm4Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel4Core8Ram80Disk => {
|
||||
common::HetznerServerType::SharedIntel4Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd4Core8Ram160Disk => {
|
||||
common::HetznerServerType::SharedAmd4Core8Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedArm8Core16Ram160Disk => {
|
||||
common::HetznerServerType::SharedArm8Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel8Core16Ram160Disk => {
|
||||
common::HetznerServerType::SharedIntel8Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd8Core16Ram240Disk => {
|
||||
common::HetznerServerType::SharedAmd8Core16Ram240Disk
|
||||
}
|
||||
HetznerServerType::SharedArm16Core32Ram320Disk => {
|
||||
common::HetznerServerType::SharedArm16Core32Ram320Disk
|
||||
}
|
||||
HetznerServerType::SharedIntel16Core32Ram320Disk => {
|
||||
common::HetznerServerType::SharedIntel16Core32Ram320Disk
|
||||
}
|
||||
HetznerServerType::SharedAmd16Core32Ram360Disk => {
|
||||
common::HetznerServerType::SharedAmd16Core32Ram360Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd2Core8Ram80Disk => {
|
||||
common::HetznerServerType::DedicatedAmd2Core8Ram80Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd4Core16Ram160Disk => {
|
||||
common::HetznerServerType::DedicatedAmd4Core16Ram160Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd8Core32Ram240Disk => {
|
||||
common::HetznerServerType::DedicatedAmd8Core32Ram240Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd16Core64Ram360Disk => {
|
||||
common::HetznerServerType::DedicatedAmd16Core64Ram360Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd32Core128Ram600Disk => {
|
||||
common::HetznerServerType::DedicatedAmd32Core128Ram600Disk
|
||||
}
|
||||
HetznerServerType::DedicatedAmd48Core192Ram960Disk => {
|
||||
common::HetznerServerType::DedicatedAmd48Core192Ram960Disk
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,5 @@
|
||||
pub mod aws;
|
||||
|
||||
#[allow(unused)]
|
||||
pub mod hetzner;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BuildCleanupData {
|
||||
/// Nothing to clean up
|
||||
|
||||
@@ -8,7 +8,7 @@ use komodo_client::entities::{
|
||||
config::core::{
|
||||
AwsCredentials, CoreConfig, DatabaseConfig, Env,
|
||||
GithubWebhookAppConfig, GithubWebhookAppInstallationConfig,
|
||||
HetznerCredentials, OauthCredentials,
|
||||
OauthCredentials,
|
||||
},
|
||||
logger::LogConfig,
|
||||
};
|
||||
@@ -120,11 +120,6 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.komodo_aws_secret_access_key)
|
||||
.unwrap_or(config.aws.secret_access_key),
|
||||
},
|
||||
hetzner: HetznerCredentials {
|
||||
token: maybe_read_item_from_file(env.komodo_hetzner_token_file, env
|
||||
.komodo_hetzner_token)
|
||||
.unwrap_or(config.hetzner.token),
|
||||
},
|
||||
github_webhook_app: GithubWebhookAppConfig {
|
||||
app_id: maybe_read_item_from_file(env.komodo_github_webhook_app_app_id_file, env
|
||||
.komodo_github_webhook_app_app_id)
|
||||
@@ -177,6 +172,8 @@ pub fn core_config() -> &'static CoreConfig {
|
||||
.unwrap_or(config.ui_write_disabled),
|
||||
disable_confirm_dialog: env.komodo_disable_confirm_dialog
|
||||
.unwrap_or(config.disable_confirm_dialog),
|
||||
disable_websocket_reconnect: env.komodo_disable_websocket_reconnect
|
||||
.unwrap_or(config.disable_websocket_reconnect),
|
||||
enable_new_users: env.komodo_enable_new_users
|
||||
.unwrap_or(config.enable_new_users),
|
||||
disable_user_registration: env.komodo_disable_user_registration
|
||||
|
||||
@@ -12,7 +12,6 @@ use komodo_client::entities::{
|
||||
provider::{DockerRegistryAccount, GitProviderAccount},
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
stats::SystemStatsRecord,
|
||||
sync::ResourceSync,
|
||||
@@ -50,7 +49,6 @@ pub struct DbClient {
|
||||
pub procedures: Collection<Procedure>,
|
||||
pub actions: Collection<Action>,
|
||||
pub alerters: Collection<Alerter>,
|
||||
pub server_templates: Collection<ServerTemplate>,
|
||||
pub resource_syncs: Collection<ResourceSync>,
|
||||
pub stacks: Collection<Stack>,
|
||||
//
|
||||
@@ -120,8 +118,6 @@ impl DbClient {
|
||||
alerters: resource_collection(&db, "Alerter").await?,
|
||||
procedures: resource_collection(&db, "Procedure").await?,
|
||||
actions: resource_collection(&db, "Action").await?,
|
||||
server_templates: resource_collection(&db, "ServerTemplate")
|
||||
.await?,
|
||||
resource_syncs: resource_collection(&db, "ResourceSync")
|
||||
.await?,
|
||||
stacks: resource_collection(&db, "Stack").await?,
|
||||
|
||||
@@ -7,7 +7,6 @@ use komodo_client::entities::{
|
||||
builder::{AwsBuilderConfig, Builder, BuilderConfig},
|
||||
komodo_timestamp,
|
||||
server::Server,
|
||||
server_template::aws::AwsServerTemplateConfig,
|
||||
update::{Log, Update},
|
||||
};
|
||||
use periphery_client::{
|
||||
@@ -88,11 +87,8 @@ async fn get_aws_builder(
|
||||
|
||||
let version = version.map(|v| format!("-v{v}")).unwrap_or_default();
|
||||
let instance_name = format!("BUILDER-{resource_name}{version}");
|
||||
let Ec2Instance { instance_id, ip } = launch_ec2_instance(
|
||||
&instance_name,
|
||||
AwsServerTemplateConfig::from_builder_config(&config),
|
||||
)
|
||||
.await?;
|
||||
let Ec2Instance { instance_id, ip } =
|
||||
launch_ec2_instance(&instance_name, &config).await?;
|
||||
|
||||
info!("ec2 instance launched");
|
||||
|
||||
|
||||
@@ -1,33 +1,18 @@
|
||||
use std::{str::FromStr, time::Duration};
|
||||
use std::time::Duration;
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::write::{CreateBuilder, CreateServer},
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
|
||||
komodo_timestamp,
|
||||
permission::{Permission, PermissionLevel, UserTarget},
|
||||
server::{PartialServerConfig, Server},
|
||||
sync::ResourceSync,
|
||||
update::Log,
|
||||
user::{User, system_user},
|
||||
},
|
||||
use komodo_client::entities::{
|
||||
ResourceTarget,
|
||||
permission::{Permission, PermissionLevel, UserTarget},
|
||||
server::Server,
|
||||
user::User,
|
||||
};
|
||||
use mongo_indexed::Document;
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{Bson, doc, oid::ObjectId, to_document},
|
||||
};
|
||||
use mungos::mongodb::bson::{Bson, doc};
|
||||
use periphery_client::PeripheryClient;
|
||||
use rand::Rng;
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs, config::core_config, resource,
|
||||
state::db_client,
|
||||
};
|
||||
use crate::{config::core_config, state::db_client};
|
||||
|
||||
pub mod action_state;
|
||||
pub mod builder;
|
||||
@@ -203,160 +188,3 @@ pub fn flatten_document(doc: Document) -> Document {
|
||||
|
||||
target
|
||||
}
|
||||
|
||||
pub async fn startup_cleanup() {
|
||||
tokio::join!(
|
||||
startup_in_progress_update_cleanup(),
|
||||
startup_open_alert_cleanup(),
|
||||
);
|
||||
}
|
||||
|
||||
/// Run on startup, as no updates should be in progress on startup
|
||||
async fn startup_in_progress_update_cleanup() {
|
||||
let log = Log::error(
|
||||
"Komodo shutdown",
|
||||
String::from(
|
||||
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
|
||||
),
|
||||
);
|
||||
// This static log won't fail to serialize, unwrap ok.
|
||||
let log = to_document(&log).unwrap();
|
||||
if let Err(e) = db_client()
|
||||
.updates
|
||||
.update_many(
|
||||
doc! { "status": "InProgress" },
|
||||
doc! {
|
||||
"$set": {
|
||||
"status": "Complete",
|
||||
"success": false,
|
||||
},
|
||||
"$push": {
|
||||
"logs": log
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("failed to cleanup in progress updates on startup | {e:#}")
|
||||
}
|
||||
}
|
||||
|
||||
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
|
||||
async fn startup_open_alert_cleanup() {
|
||||
let db = db_client();
|
||||
let Ok(alerts) =
|
||||
find_collect(&db.alerts, doc! { "resolved": false }, None)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"failed to list all alerts for startup open alert cleanup | {e:?}"
|
||||
)
|
||||
})
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let futures = alerts.into_iter().map(|alert| async move {
|
||||
match alert.target {
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get::<Server>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get::<ResourceSync>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
// No other resources should have open alerts.
|
||||
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
|
||||
}
|
||||
});
|
||||
let to_update_ids = join_all(futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
if let Err(e) = db
|
||||
.alerts
|
||||
.update_many(
|
||||
doc! { "_id": { "$in": to_update_ids } },
|
||||
doc! { "$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": komodo_timestamp()
|
||||
} },
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"failed to clean up invalid open alerts on startup | {e:#}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures a default server / builder exists with the defined address
|
||||
pub async fn ensure_first_server_and_builder() {
|
||||
let first_server = &core_config().first_server;
|
||||
if first_server.is_empty() {
|
||||
return;
|
||||
}
|
||||
let db = db_client();
|
||||
let Ok(server) = db
|
||||
.servers
|
||||
.find_one(Document::new())
|
||||
.await
|
||||
.inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}"))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let server = if let Some(server) = server {
|
||||
server
|
||||
} else {
|
||||
match (CreateServer {
|
||||
name: format!("server-{}", random_string(5)),
|
||||
config: PartialServerConfig {
|
||||
address: Some(first_server.to_string()),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
|
||||
e.error
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
let Ok(None) = db.builders
|
||||
.find_one(Document::new()).await
|
||||
.inspect_err(|e| error!("Failed to initialize 'first_builder' | Failed to query db | {e:?}")) else {
|
||||
return;
|
||||
};
|
||||
if let Err(e) = (CreateBuilder {
|
||||
name: String::from("local"),
|
||||
config: PartialBuilderConfig::Server(
|
||||
PartialServerBuilderConfig {
|
||||
server_id: Some(server.id),
|
||||
},
|
||||
),
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
|
||||
e.error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -166,6 +166,13 @@ async fn execute_stage(
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Execution::BatchPullStack(exec) => {
|
||||
extend_batch_exection::<BatchPullStack>(
|
||||
&exec.pattern,
|
||||
&mut executions,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
Execution::BatchDestroyStack(exec) => {
|
||||
extend_batch_exection::<BatchDestroyStack>(
|
||||
&exec.pattern,
|
||||
@@ -985,6 +992,12 @@ async fn execute_execution(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Execution::BatchPullStack(_) => {
|
||||
// All batch executions must be expanded in `execute_stage`
|
||||
return Err(anyhow!(
|
||||
"Batch method BatchPullStack not implemented correctly"
|
||||
));
|
||||
}
|
||||
Execution::StartStack(req) => {
|
||||
let req = ExecuteRequest::StartStack(req);
|
||||
let update = init_execution_update(&req, &user).await?;
|
||||
@@ -1275,6 +1288,16 @@ impl ExtendBatch for BatchDeployStackIfChanged {
|
||||
}
|
||||
}
|
||||
|
||||
impl ExtendBatch for BatchPullStack {
|
||||
type Resource = Stack;
|
||||
fn single_execution(stack: String) -> Execution {
|
||||
Execution::PullStack(PullStack {
|
||||
stack,
|
||||
services: Vec::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ExtendBatch for BatchDestroyStack {
|
||||
type Resource = Stack;
|
||||
fn single_execution(stack: String) -> Execution {
|
||||
|
||||
@@ -18,7 +18,6 @@ use komodo_client::entities::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::{Server, ServerState},
|
||||
server_template::ServerTemplate,
|
||||
stack::{Stack, StackServiceNames, StackState},
|
||||
stats::SystemInformation,
|
||||
sync::ResourceSync,
|
||||
@@ -305,10 +304,6 @@ pub async fn get_user_permission_on_target(
|
||||
ResourceTarget::Action(id) => {
|
||||
get_user_permission_on_resource::<Action>(user, id).await
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
get_user_permission_on_resource::<ServerTemplate>(user, id)
|
||||
.await
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
get_user_permission_on_resource::<ResourceSync>(user, id).await
|
||||
}
|
||||
|
||||
@@ -9,7 +9,6 @@ use komodo_client::entities::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
update::{Update, UpdateListItem},
|
||||
@@ -385,16 +384,6 @@ pub async fn init_execution_update(
|
||||
return Ok(Default::default());
|
||||
}
|
||||
|
||||
// Server template
|
||||
ExecuteRequest::LaunchServer(data) => (
|
||||
Operation::LaunchServer,
|
||||
ResourceTarget::ServerTemplate(
|
||||
resource::get::<ServerTemplate>(&data.server_template)
|
||||
.await?
|
||||
.id,
|
||||
),
|
||||
),
|
||||
|
||||
// Resource Sync
|
||||
ExecuteRequest::RunSync(data) => (
|
||||
Operation::RunSync,
|
||||
@@ -446,6 +435,9 @@ pub async fn init_execution_update(
|
||||
resource::get::<Stack>(&data.stack).await?.id,
|
||||
),
|
||||
),
|
||||
ExecuteRequest::BatchPullStack(_data) => {
|
||||
return Ok(Default::default());
|
||||
}
|
||||
ExecuteRequest::RestartStack(data) => (
|
||||
if !data.services.is_empty() {
|
||||
Operation::RestartStackService
|
||||
|
||||
@@ -25,6 +25,7 @@ mod monitor;
|
||||
mod resource;
|
||||
mod schedule;
|
||||
mod stack;
|
||||
mod startup;
|
||||
mod state;
|
||||
mod sync;
|
||||
mod ts_client;
|
||||
@@ -44,22 +45,18 @@ async fn app() -> anyhow::Result<()> {
|
||||
info!("Komodo Core version: v{}", env!("CARGO_PKG_VERSION"));
|
||||
info!("{:?}", config.sanitized());
|
||||
|
||||
// Init jwt client to crash on failure
|
||||
state::jwt_client();
|
||||
tokio::join!(
|
||||
// Init db_client check to crash on db init failure
|
||||
state::init_db_client(),
|
||||
// Manage OIDC client (defined in config / env vars / compose secret file)
|
||||
auth::oidc::client::spawn_oidc_client_management()
|
||||
);
|
||||
tokio::join!(
|
||||
// Maybe initialize first server
|
||||
helpers::ensure_first_server_and_builder(),
|
||||
// Cleanup open updates / invalid alerts
|
||||
helpers::startup_cleanup(),
|
||||
);
|
||||
// init jwt client to crash on failure
|
||||
state::jwt_client();
|
||||
|
||||
// Spawn tasks
|
||||
// Run after db connection.
|
||||
startup::on_startup().await;
|
||||
|
||||
// Spawn background tasks
|
||||
monitor::spawn_monitor_loop();
|
||||
resource::spawn_resource_refresh_loop();
|
||||
resource::spawn_build_state_refresh_loop();
|
||||
@@ -82,6 +79,7 @@ async fn app() -> anyhow::Result<()> {
|
||||
.nest("/read", api::read::router())
|
||||
.nest("/write", api::write::router())
|
||||
.nest("/execute", api::execute::router())
|
||||
.nest("/terminal", api::terminal::router())
|
||||
.nest("/listener", listener::router())
|
||||
.nest("/ws", ws::router())
|
||||
.nest("/client", ts_client::router())
|
||||
|
||||
@@ -56,7 +56,6 @@ mod procedure;
|
||||
mod refresh;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
@@ -773,9 +772,6 @@ fn resource_target<T: KomodoResource>(id: String) -> ResourceTarget {
|
||||
ResourceTargetVariant::Repo => ResourceTarget::Repo(id),
|
||||
ResourceTargetVariant::Alerter => ResourceTarget::Alerter(id),
|
||||
ResourceTargetVariant::Procedure => ResourceTarget::Procedure(id),
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
ResourceTarget::ServerTemplate(id)
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
ResourceTarget::ResourceSync(id)
|
||||
}
|
||||
@@ -1020,9 +1016,6 @@ where
|
||||
ResourceTarget::Stack(id) => ("recents.Stack", id),
|
||||
ResourceTarget::Builder(id) => ("recents.Builder", id),
|
||||
ResourceTarget::Alerter(id) => ("recents.Alerter", id),
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
("recents.ServerTemplate", id)
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => ("recents.ResourceSync", id),
|
||||
ResourceTarget::System(_) => return,
|
||||
};
|
||||
|
||||
@@ -641,6 +641,13 @@ async fn validate_config(
|
||||
.await?;
|
||||
params.stack = stack.id;
|
||||
}
|
||||
Execution::BatchPullStack(_params) => {
|
||||
if !user.admin {
|
||||
return Err(anyhow!(
|
||||
"Non admin user cannot configure Batch executions"
|
||||
));
|
||||
}
|
||||
}
|
||||
Execution::StartStack(params) => {
|
||||
let stack = super::get_check_permissions::<Stack>(
|
||||
¶ms.stack,
|
||||
|
||||
@@ -43,10 +43,11 @@ impl super::KomodoResource for Server {
|
||||
server: Resource<Self::Config, Self::Info>,
|
||||
) -> Self::ListItem {
|
||||
let status = server_status_cache().get(&server.id).await;
|
||||
let terminals_disabled = get_system_info(&server)
|
||||
.await
|
||||
.map(|i| i.terminals_disabled)
|
||||
.unwrap_or(true);
|
||||
let (terminals_disabled, container_exec_disabled) =
|
||||
get_system_info(&server)
|
||||
.await
|
||||
.map(|i| (i.terminals_disabled, i.container_exec_disabled))
|
||||
.unwrap_or((true, true));
|
||||
ServerListItem {
|
||||
name: server.name,
|
||||
id: server.id,
|
||||
@@ -63,6 +64,7 @@ impl super::KomodoResource for Server {
|
||||
send_mem_alerts: server.config.send_mem_alerts,
|
||||
send_disk_alerts: server.config.send_disk_alerts,
|
||||
terminals_disabled,
|
||||
container_exec_disabled,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,149 +0,0 @@
|
||||
use komodo_client::entities::{
|
||||
MergePartial, Operation, ResourceTarget, ResourceTargetVariant,
|
||||
resource::Resource,
|
||||
server_template::{
|
||||
PartialServerTemplateConfig, ServerTemplate,
|
||||
ServerTemplateConfig, ServerTemplateConfigDiff,
|
||||
ServerTemplateConfigVariant, ServerTemplateListItem,
|
||||
ServerTemplateListItemInfo, ServerTemplateQuerySpecifics,
|
||||
},
|
||||
update::Update,
|
||||
user::User,
|
||||
};
|
||||
use mungos::mongodb::{
|
||||
Collection,
|
||||
bson::{Document, to_document},
|
||||
};
|
||||
|
||||
use crate::state::db_client;
|
||||
|
||||
impl super::KomodoResource for ServerTemplate {
|
||||
type Config = ServerTemplateConfig;
|
||||
type PartialConfig = PartialServerTemplateConfig;
|
||||
type ConfigDiff = ServerTemplateConfigDiff;
|
||||
type Info = ();
|
||||
type ListItem = ServerTemplateListItem;
|
||||
type QuerySpecifics = ServerTemplateQuerySpecifics;
|
||||
|
||||
fn resource_type() -> ResourceTargetVariant {
|
||||
ResourceTargetVariant::ServerTemplate
|
||||
}
|
||||
|
||||
fn resource_target(id: impl Into<String>) -> ResourceTarget {
|
||||
ResourceTarget::ServerTemplate(id.into())
|
||||
}
|
||||
|
||||
fn coll() -> &'static Collection<Resource<Self::Config, Self::Info>>
|
||||
{
|
||||
&db_client().server_templates
|
||||
}
|
||||
|
||||
async fn to_list_item(
|
||||
server_template: Resource<Self::Config, Self::Info>,
|
||||
) -> Self::ListItem {
|
||||
let (template_type, instance_type) = match server_template.config
|
||||
{
|
||||
ServerTemplateConfig::Aws(config) => (
|
||||
ServerTemplateConfigVariant::Aws.to_string(),
|
||||
Some(config.instance_type),
|
||||
),
|
||||
ServerTemplateConfig::Hetzner(config) => (
|
||||
ServerTemplateConfigVariant::Hetzner.to_string(),
|
||||
Some(config.server_type.as_ref().to_string()),
|
||||
),
|
||||
};
|
||||
ServerTemplateListItem {
|
||||
name: server_template.name,
|
||||
id: server_template.id,
|
||||
tags: server_template.tags,
|
||||
resource_type: ResourceTargetVariant::ServerTemplate,
|
||||
info: ServerTemplateListItemInfo {
|
||||
provider: template_type.to_string(),
|
||||
instance_type,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
async fn busy(_id: &String) -> anyhow::Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
// CREATE
|
||||
|
||||
fn create_operation() -> Operation {
|
||||
Operation::CreateServerTemplate
|
||||
}
|
||||
|
||||
fn user_can_create(user: &User) -> bool {
|
||||
user.admin
|
||||
}
|
||||
|
||||
async fn validate_create_config(
|
||||
_config: &mut Self::PartialConfig,
|
||||
_user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn post_create(
|
||||
_created: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// UPDATE
|
||||
|
||||
fn update_operation() -> Operation {
|
||||
Operation::UpdateServerTemplate
|
||||
}
|
||||
|
||||
async fn validate_update_config(
|
||||
_id: &str,
|
||||
_config: &mut Self::PartialConfig,
|
||||
_user: &User,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_document(
|
||||
original: Resource<Self::Config, Self::Info>,
|
||||
config: Self::PartialConfig,
|
||||
) -> Result<Document, mungos::mongodb::bson::ser::Error> {
|
||||
let config = original.config.merge_partial(config);
|
||||
to_document(&config)
|
||||
}
|
||||
|
||||
async fn post_update(
|
||||
_updated: &Self,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// RENAME
|
||||
|
||||
fn rename_operation() -> Operation {
|
||||
Operation::RenameServerTemplate
|
||||
}
|
||||
|
||||
// DELETE
|
||||
|
||||
fn delete_operation() -> Operation {
|
||||
Operation::DeleteServerTemplate
|
||||
}
|
||||
|
||||
async fn pre_delete(
|
||||
_resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn post_delete(
|
||||
_resource: &Resource<Self::Config, Self::Info>,
|
||||
_update: &mut Update,
|
||||
) -> anyhow::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
228
bin/core/src/startup.rs
Normal file
228
bin/core/src/startup.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use std::str::FromStr;
|
||||
|
||||
use futures::future::join_all;
|
||||
use komodo_client::{
|
||||
api::write::{CreateBuilder, CreateServer},
|
||||
entities::{
|
||||
ResourceTarget,
|
||||
builder::{PartialBuilderConfig, PartialServerBuilderConfig},
|
||||
komodo_timestamp,
|
||||
server::{PartialServerConfig, Server},
|
||||
sync::ResourceSync,
|
||||
update::Log,
|
||||
user::system_user,
|
||||
},
|
||||
};
|
||||
use mungos::{
|
||||
find::find_collect,
|
||||
mongodb::bson::{Document, doc, oid::ObjectId, to_document},
|
||||
};
|
||||
use resolver_api::Resolve;
|
||||
|
||||
use crate::{
|
||||
api::write::WriteArgs, config::core_config, helpers::random_string,
|
||||
resource, state::db_client,
|
||||
};
|
||||
|
||||
/// This function should be run on startup,
|
||||
/// after the db client has been initialized
|
||||
pub async fn on_startup() {
|
||||
tokio::join!(
|
||||
in_progress_update_cleanup(),
|
||||
open_alert_cleanup(),
|
||||
ensure_first_server_and_builder(),
|
||||
clean_up_server_templates(),
|
||||
);
|
||||
}
|
||||
|
||||
async fn in_progress_update_cleanup() {
|
||||
let log = Log::error(
|
||||
"Komodo shutdown",
|
||||
String::from(
|
||||
"Komodo shutdown during execution. If this is a build, the builder may not have been terminated.",
|
||||
),
|
||||
);
|
||||
// This static log won't fail to serialize, unwrap ok.
|
||||
let log = to_document(&log).unwrap();
|
||||
if let Err(e) = db_client()
|
||||
.updates
|
||||
.update_many(
|
||||
doc! { "status": "InProgress" },
|
||||
doc! {
|
||||
"$set": {
|
||||
"status": "Complete",
|
||||
"success": false,
|
||||
},
|
||||
"$push": {
|
||||
"logs": log
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("failed to cleanup in progress updates on startup | {e:#}")
|
||||
}
|
||||
}
|
||||
|
||||
/// Run on startup, ensure open alerts pointing to invalid resources are closed.
|
||||
async fn open_alert_cleanup() {
|
||||
let db = db_client();
|
||||
let Ok(alerts) =
|
||||
find_collect(&db.alerts, doc! { "resolved": false }, None)
|
||||
.await
|
||||
.inspect_err(|e| {
|
||||
error!(
|
||||
"failed to list all alerts for startup open alert cleanup | {e:?}"
|
||||
)
|
||||
})
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let futures = alerts.into_iter().map(|alert| async move {
|
||||
match alert.target {
|
||||
ResourceTarget::Server(id) => {
|
||||
resource::get::<Server>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
resource::get::<ResourceSync>(&id)
|
||||
.await
|
||||
.is_err()
|
||||
.then(|| ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok()).flatten()
|
||||
}
|
||||
// No other resources should have open alerts.
|
||||
_ => ObjectId::from_str(&alert.id).inspect_err(|e| warn!("failed to clean up alert - id is invalid ObjectId | {e:?}")).ok(),
|
||||
}
|
||||
});
|
||||
let to_update_ids = join_all(futures)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.collect::<Vec<_>>();
|
||||
if let Err(e) = db
|
||||
.alerts
|
||||
.update_many(
|
||||
doc! { "_id": { "$in": to_update_ids } },
|
||||
doc! { "$set": {
|
||||
"resolved": true,
|
||||
"resolved_ts": komodo_timestamp()
|
||||
} },
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"failed to clean up invalid open alerts on startup | {e:#}"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Ensures a default server / builder exists with the defined address
|
||||
async fn ensure_first_server_and_builder() {
|
||||
let first_server = &core_config().first_server;
|
||||
if first_server.is_empty() {
|
||||
return;
|
||||
}
|
||||
let db = db_client();
|
||||
let Ok(server) = db
|
||||
.servers
|
||||
.find_one(Document::new())
|
||||
.await
|
||||
.inspect_err(|e| error!("Failed to initialize 'first_server'. Failed to query db. {e:?}"))
|
||||
else {
|
||||
return;
|
||||
};
|
||||
let server = if let Some(server) = server {
|
||||
server
|
||||
} else {
|
||||
match (CreateServer {
|
||||
name: format!("server-{}", random_string(5)),
|
||||
config: PartialServerConfig {
|
||||
address: Some(first_server.to_string()),
|
||||
enabled: Some(true),
|
||||
..Default::default()
|
||||
},
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to initialize 'first_server'. Failed to CreateServer. {:#}",
|
||||
e.error
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
let Ok(None) = db.builders
|
||||
.find_one(Document::new()).await
|
||||
.inspect_err(|e| error!("Failed to initialize 'first_builder' | Failed to query db | {e:?}")) else {
|
||||
return;
|
||||
};
|
||||
if let Err(e) = (CreateBuilder {
|
||||
name: String::from("local"),
|
||||
config: PartialBuilderConfig::Server(
|
||||
PartialServerBuilderConfig {
|
||||
server_id: Some(server.id),
|
||||
},
|
||||
),
|
||||
})
|
||||
.resolve(&WriteArgs {
|
||||
user: system_user().to_owned(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
"Failed to initialize 'first_builder' | Failed to CreateBuilder | {:#}",
|
||||
e.error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// v1.17.5 removes the ServerTemplate resource.
|
||||
/// References to this resource type need to be cleaned up
|
||||
/// to avoid type errors reading from the database.
|
||||
async fn clean_up_server_templates() {
|
||||
let db = db_client();
|
||||
tokio::join!(
|
||||
async {
|
||||
db.permissions
|
||||
.delete_many(doc! {
|
||||
"resource_target.type": "ServerTemplate",
|
||||
})
|
||||
.await
|
||||
.expect(
|
||||
"Failed to clean up server template permissions on db",
|
||||
);
|
||||
},
|
||||
async {
|
||||
db.updates
|
||||
.delete_many(doc! { "target.type": "ServerTemplate" })
|
||||
.await
|
||||
.expect("Failed to clean up server template updates on db");
|
||||
},
|
||||
async {
|
||||
db.users
|
||||
.update_many(
|
||||
Document::new(),
|
||||
doc! { "$unset": { "recents.ServerTemplate": 1, "all.ServerTemplate": 1 } }
|
||||
)
|
||||
.await
|
||||
.expect("Failed to clean up server template updates on db");
|
||||
},
|
||||
async {
|
||||
db.user_groups
|
||||
.update_many(
|
||||
Document::new(),
|
||||
doc! { "$unset": { "all.ServerTemplate": 1 } },
|
||||
)
|
||||
.await
|
||||
.expect("Failed to clean up server template updates on db");
|
||||
},
|
||||
);
|
||||
}
|
||||
@@ -270,9 +270,6 @@ pub fn extend_resources(
|
||||
resources
|
||||
.builders
|
||||
.extend(filter_by_tag(more.builders, match_tags));
|
||||
resources
|
||||
.server_templates
|
||||
.extend(filter_by_tag(more.server_templates, match_tags));
|
||||
resources
|
||||
.resource_syncs
|
||||
.extend(filter_by_tag(more.resource_syncs, match_tags));
|
||||
|
||||
@@ -11,7 +11,6 @@ use komodo_client::entities::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
@@ -166,7 +165,6 @@ pub struct AllResourcesById {
|
||||
pub actions: HashMap<String, Action>,
|
||||
pub builders: HashMap<String, Builder>,
|
||||
pub alerters: HashMap<String, Alerter>,
|
||||
pub templates: HashMap<String, ServerTemplate>,
|
||||
pub syncs: HashMap<String, ResourceSync>,
|
||||
}
|
||||
|
||||
@@ -210,10 +208,6 @@ impl AllResourcesById {
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
.await?,
|
||||
templates: crate::resource::get_id_to_resource_map::<
|
||||
ServerTemplate,
|
||||
>(id_to_tags, match_tags)
|
||||
.await?,
|
||||
syncs: crate::resource::get_id_to_resource_map::<ResourceSync>(
|
||||
id_to_tags, match_tags,
|
||||
)
|
||||
|
||||
@@ -13,7 +13,6 @@ use komodo_client::{
|
||||
procedure::Procedure,
|
||||
repo::Repo,
|
||||
server::Server,
|
||||
server_template::ServerTemplate,
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
@@ -191,18 +190,6 @@ impl ResourceSyncTrait for Builder {
|
||||
|
||||
impl ExecuteResourceSync for Builder {}
|
||||
|
||||
impl ResourceSyncTrait for ServerTemplate {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
update: Self::PartialConfig,
|
||||
_resources: &AllResourcesById,
|
||||
) -> anyhow::Result<Self::ConfigDiff> {
|
||||
Ok(original.partial_diff(update))
|
||||
}
|
||||
}
|
||||
|
||||
impl ExecuteResourceSync for ServerTemplate {}
|
||||
|
||||
impl ResourceSyncTrait for Action {
|
||||
fn get_diff(
|
||||
original: Self::Config,
|
||||
@@ -614,6 +601,7 @@ impl ResourceSyncTrait for Procedure {
|
||||
.map(|s| s.name.clone())
|
||||
.unwrap_or_default();
|
||||
}
|
||||
Execution::BatchPullStack(_config) => {}
|
||||
Execution::StartStack(config) => {
|
||||
config.stack = resources
|
||||
.stacks
|
||||
|
||||
@@ -13,7 +13,6 @@ use komodo_client::{
|
||||
repo::Repo,
|
||||
resource::Resource,
|
||||
server::Server,
|
||||
server_template::{PartialServerTemplateConfig, ServerTemplate},
|
||||
stack::Stack,
|
||||
sync::ResourceSync,
|
||||
tag::Tag,
|
||||
@@ -349,25 +348,6 @@ impl ToToml for Repo {
|
||||
}
|
||||
}
|
||||
|
||||
impl ToToml for ServerTemplate {
|
||||
fn push_additional(
|
||||
resource: ResourceToml<Self::PartialConfig>,
|
||||
toml: &mut String,
|
||||
) {
|
||||
let empty_params = match resource.config {
|
||||
PartialServerTemplateConfig::Aws(config) => config.is_none(),
|
||||
PartialServerTemplateConfig::Hetzner(config) => {
|
||||
config.is_none()
|
||||
}
|
||||
};
|
||||
if empty_params {
|
||||
// toml_pretty will remove empty map
|
||||
// but in this case its needed to deserialize the enums.
|
||||
toml.push_str("\nparams = {}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToToml for Builder {
|
||||
fn replace_ids(
|
||||
resource: &mut Resource<Self::Config, Self::Info>,
|
||||
@@ -747,6 +727,7 @@ impl ToToml for Procedure {
|
||||
.map(|r| &r.name)
|
||||
.unwrap_or(&String::new()),
|
||||
),
|
||||
Execution::BatchPullStack(_exec) => {}
|
||||
Execution::StartStack(exec) => exec.stack.clone_from(
|
||||
all
|
||||
.stacks
|
||||
|
||||
@@ -285,13 +285,6 @@ pub async fn get_updates_for_execution(
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = all_resources
|
||||
.templates
|
||||
.get(id)
|
||||
.map(|b| b.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
*id = all_resources
|
||||
.syncs
|
||||
@@ -737,19 +730,6 @@ async fn expand_user_group_permissions(
|
||||
});
|
||||
expanded.extend(permissions);
|
||||
}
|
||||
ResourceTargetVariant::ServerTemplate => {
|
||||
let permissions = all_resources
|
||||
.templates
|
||||
.values()
|
||||
.filter(|resource| regex.is_match(&resource.name))
|
||||
.map(|resource| PermissionToml {
|
||||
target: ResourceTarget::ServerTemplate(
|
||||
resource.name.clone(),
|
||||
),
|
||||
level: permission.level,
|
||||
});
|
||||
expanded.extend(permissions);
|
||||
}
|
||||
ResourceTargetVariant::ResourceSync => {
|
||||
let permissions = all_resources
|
||||
.syncs
|
||||
@@ -903,13 +883,6 @@ pub async fn convert_user_groups(
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ServerTemplate(id) => {
|
||||
*id = all
|
||||
.templates
|
||||
.get(id)
|
||||
.map(|r| r.name.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
ResourceTarget::ResourceSync(id) => {
|
||||
*id = all
|
||||
.syncs
|
||||
|
||||
81
bin/core/src/ws/container.rs
Normal file
81
bin/core/src/ws/container.rs
Normal file
@@ -0,0 +1,81 @@
|
||||
use axum::{
|
||||
extract::{Query, WebSocketUpgrade, ws::Message},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use futures::SinkExt;
|
||||
use komodo_client::{
|
||||
api::terminal::ConnectContainerExecQuery,
|
||||
entities::{permission::PermissionLevel, server::Server},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
helpers::periphery_client, resource, ws::core_periphery_forward_ws,
|
||||
};
|
||||
|
||||
#[instrument(name = "ConnectContainerExec", skip(ws))]
|
||||
pub async fn handler(
|
||||
Query(ConnectContainerExecQuery {
|
||||
server,
|
||||
container,
|
||||
shell,
|
||||
}): Query<ConnectContainerExecQuery>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> impl IntoResponse {
|
||||
ws.on_upgrade(|socket| async move {
|
||||
let Some((mut client_socket, user)) = super::ws_login(socket).await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
||||
let server = match resource::get_check_permissions::<Server>(
|
||||
&server,
|
||||
&user,
|
||||
PermissionLevel::Write,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
debug!("could not get server | {e:#}");
|
||||
let _ =
|
||||
client_socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let periphery = match periphery_client(&server) {
|
||||
Ok(periphery) => periphery,
|
||||
Err(e) => {
|
||||
debug!("couldn't get periphery | {e:#}");
|
||||
let _ =
|
||||
client_socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
trace!("connecting to periphery container exec websocket");
|
||||
|
||||
let periphery_socket = match periphery
|
||||
.connect_container_exec(
|
||||
container,
|
||||
shell
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(ws) => ws,
|
||||
Err(e) => {
|
||||
debug!("Failed connect to periphery container exec websocket | {e:#}");
|
||||
let _ =
|
||||
client_socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
trace!("connected to periphery container exec websocket");
|
||||
|
||||
core_periphery_forward_ws(client_socket, periphery_socket).await
|
||||
})
|
||||
}
|
||||
@@ -5,12 +5,18 @@ use crate::{
|
||||
use anyhow::anyhow;
|
||||
use axum::{
|
||||
Router,
|
||||
extract::ws::{Message, WebSocket},
|
||||
extract::ws::{CloseFrame, Message, Utf8Bytes, WebSocket},
|
||||
routing::get,
|
||||
};
|
||||
use futures::SinkExt;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use komodo_client::{entities::user::User, ws::WsLoginMessage};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio_tungstenite::{
|
||||
MaybeTlsStream, WebSocketStream, tungstenite,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
mod container;
|
||||
mod terminal;
|
||||
mod update;
|
||||
|
||||
@@ -18,6 +24,7 @@ pub fn router() -> Router {
|
||||
Router::new()
|
||||
.route("/update", get(update::handler))
|
||||
.route("/terminal", get(terminal::handler))
|
||||
.route("/container", get(container::handler))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
@@ -110,3 +117,127 @@ async fn check_user_valid(user_id: &str) -> anyhow::Result<User> {
|
||||
}
|
||||
Ok(user)
|
||||
}
|
||||
|
||||
async fn core_periphery_forward_ws(
|
||||
client_socket: axum::extract::ws::WebSocket,
|
||||
periphery_socket: WebSocketStream<MaybeTlsStream<TcpStream>>,
|
||||
) {
|
||||
let (mut periphery_send, mut periphery_receive) =
|
||||
periphery_socket.split();
|
||||
let (mut core_send, mut core_receive) = client_socket.split();
|
||||
let cancel = CancellationToken::new();
|
||||
|
||||
trace!("starting ws exchange");
|
||||
|
||||
let core_to_periphery = async {
|
||||
loop {
|
||||
let res = tokio::select! {
|
||||
res = core_receive.next() => res,
|
||||
_ = cancel.cancelled() => {
|
||||
trace!("core to periphery read: cancelled from inside");
|
||||
break;
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Some(Ok(msg)) => {
|
||||
if let Err(e) =
|
||||
periphery_send.send(axum_to_tungstenite(msg)).await
|
||||
{
|
||||
debug!(
|
||||
"Failed to send terminal message | {e:?}",
|
||||
);
|
||||
cancel.cancel();
|
||||
break;
|
||||
};
|
||||
}
|
||||
Some(Err(_e)) => {
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let periphery_to_core = async {
|
||||
loop {
|
||||
let res = tokio::select! {
|
||||
res = periphery_receive.next() => res,
|
||||
_ = cancel.cancelled() => {
|
||||
trace!("periphery to core read: cancelled from inside");
|
||||
break;
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Some(Ok(msg)) => {
|
||||
if let Err(e) =
|
||||
core_send.send(tungstenite_to_axum(msg)).await
|
||||
{
|
||||
debug!("{e:?}");
|
||||
cancel.cancel();
|
||||
break;
|
||||
};
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
let _ = core_send
|
||||
.send(Message::text(format!(
|
||||
"ERROR: Failed to receive message from periphery | {e:?}"
|
||||
)))
|
||||
.await;
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
let _ = core_send.send(Message::text("STREAM EOF")).await;
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
tokio::join!(core_to_periphery, periphery_to_core);
|
||||
}
|
||||
|
||||
fn axum_to_tungstenite(msg: Message) -> tungstenite::Message {
|
||||
match msg {
|
||||
Message::Text(text) => tungstenite::Message::Text(
|
||||
// TODO: improve this conversion cost from axum ws library
|
||||
tungstenite::Utf8Bytes::from(text.to_string()),
|
||||
),
|
||||
Message::Binary(bytes) => tungstenite::Message::Binary(bytes),
|
||||
Message::Ping(bytes) => tungstenite::Message::Ping(bytes),
|
||||
Message::Pong(bytes) => tungstenite::Message::Pong(bytes),
|
||||
Message::Close(close_frame) => {
|
||||
tungstenite::Message::Close(close_frame.map(|cf| {
|
||||
tungstenite::protocol::CloseFrame {
|
||||
code: cf.code.into(),
|
||||
reason: tungstenite::Utf8Bytes::from(cf.reason.to_string()),
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn tungstenite_to_axum(msg: tungstenite::Message) -> Message {
|
||||
match msg {
|
||||
tungstenite::Message::Text(text) => {
|
||||
Message::Text(Utf8Bytes::from(text.to_string()))
|
||||
}
|
||||
tungstenite::Message::Binary(bytes) => Message::Binary(bytes),
|
||||
tungstenite::Message::Ping(bytes) => Message::Ping(bytes),
|
||||
tungstenite::Message::Pong(bytes) => Message::Pong(bytes),
|
||||
tungstenite::Message::Close(close_frame) => {
|
||||
Message::Close(close_frame.map(|cf| CloseFrame {
|
||||
code: cf.code.into(),
|
||||
reason: Utf8Bytes::from(cf.reason.to_string()),
|
||||
}))
|
||||
}
|
||||
tungstenite::Message::Frame(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,31 +1,27 @@
|
||||
use axum::{
|
||||
extract::{
|
||||
Query, WebSocketUpgrade,
|
||||
ws::{CloseFrame, Message, Utf8Bytes},
|
||||
},
|
||||
extract::{Query, WebSocketUpgrade, ws::Message},
|
||||
response::IntoResponse,
|
||||
};
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use futures::SinkExt;
|
||||
use komodo_client::{
|
||||
api::terminal::ConnectTerminalQuery,
|
||||
entities::{permission::PermissionLevel, server::Server},
|
||||
};
|
||||
use tokio_tungstenite::tungstenite;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{helpers::periphery_client, resource};
|
||||
use crate::{
|
||||
helpers::periphery_client, resource, ws::core_periphery_forward_ws,
|
||||
};
|
||||
|
||||
#[instrument(name = "ConnectTerminal", skip(ws))]
|
||||
pub async fn handler(
|
||||
Query(ConnectTerminalQuery {
|
||||
server,
|
||||
terminal,
|
||||
init,
|
||||
}): Query<ConnectTerminalQuery>,
|
||||
Query(ConnectTerminalQuery { server, terminal }): Query<
|
||||
ConnectTerminalQuery,
|
||||
>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> impl IntoResponse {
|
||||
ws.on_upgrade(|socket| async move {
|
||||
let Some((mut socket, user)) = super::ws_login(socket).await
|
||||
let Some((mut client_socket, user)) =
|
||||
super::ws_login(socket).await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
@@ -40,9 +36,10 @@ pub async fn handler(
|
||||
Ok(server) => server,
|
||||
Err(e) => {
|
||||
debug!("could not get server | {e:#}");
|
||||
let _ =
|
||||
socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = socket.close().await;
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!("ERROR: {e:#}")))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
@@ -51,150 +48,31 @@ pub async fn handler(
|
||||
Ok(periphery) => periphery,
|
||||
Err(e) => {
|
||||
debug!("couldn't get periphery | {e:#}");
|
||||
let _ =
|
||||
socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = socket.close().await;
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!("ERROR: {e:#}")))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
trace!("connecting to periphery terminal");
|
||||
trace!("connecting to periphery terminal websocket");
|
||||
|
||||
let periphery_socket = match periphery
|
||||
.connect_terminal(
|
||||
terminal,
|
||||
init,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(ws) => ws,
|
||||
Err(e) => {
|
||||
debug!("Failed connect to periphery terminal | {e:#}");
|
||||
let _ =
|
||||
socket.send(Message::text(format!("ERROR: {e:#}"))).await;
|
||||
let _ = socket.close().await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
trace!("connected to periphery terminal socket");
|
||||
|
||||
let (mut periphery_send, mut periphery_receive) =
|
||||
periphery_socket.split();
|
||||
let (mut core_send, mut core_receive) = socket.split();
|
||||
let cancel = CancellationToken::new();
|
||||
|
||||
trace!("starting ws exchange");
|
||||
|
||||
let core_to_periphery = async {
|
||||
loop {
|
||||
let res = tokio::select! {
|
||||
res = core_receive.next() => res,
|
||||
_ = cancel.cancelled() => {
|
||||
trace!("core to periphery read: cancelled from inside");
|
||||
break;
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Some(Ok(msg)) => {
|
||||
if let Err(e) =
|
||||
periphery_send.send(axum_to_tungstenite(msg)).await
|
||||
{
|
||||
debug!("Failed to send terminal message to {} | {e:?}", server.name);
|
||||
cancel.cancel();
|
||||
break;
|
||||
};
|
||||
}
|
||||
Some(Err(_e)) => {
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
let periphery_socket =
|
||||
match periphery.connect_terminal(terminal).await {
|
||||
Ok(ws) => ws,
|
||||
Err(e) => {
|
||||
debug!("Failed connect to periphery terminal | {e:#}");
|
||||
let _ = client_socket
|
||||
.send(Message::text(format!("ERROR: {e:#}")))
|
||||
.await;
|
||||
let _ = client_socket.close().await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
let periphery_to_core = async {
|
||||
loop {
|
||||
let res = tokio::select! {
|
||||
res = periphery_receive.next() => res,
|
||||
_ = cancel.cancelled() => {
|
||||
trace!("periphery to core read: cancelled from inside");
|
||||
break;
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Some(Ok(msg)) => {
|
||||
if let Err(e) =
|
||||
core_send.send(tungstenite_to_axum(msg)).await
|
||||
{
|
||||
debug!("{e:?}");
|
||||
cancel.cancel();
|
||||
break;
|
||||
};
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
let _ = core_send
|
||||
.send(Message::text(format!(
|
||||
"ERROR: Failed to receive message from periphery | {e:?}"
|
||||
)))
|
||||
.await;
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
None => {
|
||||
let _ = core_send
|
||||
.send(Message::text("STREAM EOF"))
|
||||
.await;
|
||||
cancel.cancel();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
trace!("connected to periphery terminal websocket");
|
||||
|
||||
tokio::join!(core_to_periphery, periphery_to_core);
|
||||
core_periphery_forward_ws(client_socket, periphery_socket).await
|
||||
})
|
||||
}
|
||||
|
||||
fn axum_to_tungstenite(msg: Message) -> tungstenite::Message {
|
||||
match msg {
|
||||
Message::Text(text) => tungstenite::Message::Text(
|
||||
tungstenite::Utf8Bytes::from(text.to_string()),
|
||||
),
|
||||
Message::Binary(bytes) => tungstenite::Message::Binary(bytes),
|
||||
Message::Ping(bytes) => tungstenite::Message::Ping(bytes),
|
||||
Message::Pong(bytes) => tungstenite::Message::Pong(bytes),
|
||||
Message::Close(close_frame) => {
|
||||
tungstenite::Message::Close(close_frame.map(|cf| {
|
||||
tungstenite::protocol::CloseFrame {
|
||||
code: cf.code.into(),
|
||||
reason: tungstenite::Utf8Bytes::from(cf.reason.to_string()),
|
||||
}
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn tungstenite_to_axum(msg: tungstenite::Message) -> Message {
|
||||
match msg {
|
||||
tungstenite::Message::Text(text) => {
|
||||
Message::Text(Utf8Bytes::from(text.to_string()))
|
||||
}
|
||||
tungstenite::Message::Binary(bytes) => Message::Binary(bytes),
|
||||
tungstenite::Message::Ping(bytes) => Message::Ping(bytes),
|
||||
tungstenite::Message::Pong(bytes) => Message::Pong(bytes),
|
||||
tungstenite::Message::Close(close_frame) => {
|
||||
Message::Close(close_frame.map(|cf| CloseFrame {
|
||||
code: cf.code.into(),
|
||||
reason: Utf8Bytes::from(cf.reason.to_string()),
|
||||
}))
|
||||
}
|
||||
tungstenite::Message::Frame(_) => {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -33,6 +33,8 @@ resolver_api.workspace = true
|
||||
run_command.workspace = true
|
||||
svi.workspace = true
|
||||
# external
|
||||
pin-project-lite.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
portable-pty.workspace = true
|
||||
axum-server.workspace = true
|
||||
serde_json.workspace = true
|
||||
|
||||
@@ -24,7 +24,21 @@ pub fn router() -> Router {
|
||||
.route("/", post(handler))
|
||||
.layer(middleware::from_fn(guard_request_by_passkey)),
|
||||
)
|
||||
.route("/terminal", get(super::terminal::connect_terminal))
|
||||
.nest(
|
||||
"/terminal",
|
||||
Router::new()
|
||||
.route("/", get(super::terminal::connect_terminal))
|
||||
.route(
|
||||
"/container",
|
||||
get(super::terminal::connect_container_exec),
|
||||
)
|
||||
.nest(
|
||||
"/execute",
|
||||
Router::new()
|
||||
.route("/", post(super::terminal::execute_terminal))
|
||||
.layer(middleware::from_fn(guard_request_by_passkey)),
|
||||
),
|
||||
)
|
||||
.layer(middleware::from_fn(guard_request_by_ip))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::{collections::HashMap, sync::OnceLock};
|
||||
use std::{collections::HashMap, sync::OnceLock, task::Poll};
|
||||
|
||||
use anyhow::{Context, anyhow};
|
||||
use axum::{
|
||||
@@ -10,28 +10,21 @@ use axum::{
|
||||
response::Response,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use futures::{SinkExt, StreamExt};
|
||||
use komodo_client::entities::{
|
||||
NoData, komodo_timestamp, server::TerminalInfo,
|
||||
};
|
||||
use periphery_client::api::terminal::{
|
||||
ConnectTerminalQuery, CreateTerminal, CreateTerminalAuthToken,
|
||||
CreateTerminalAuthTokenResponse, DeleteAllTerminals,
|
||||
DeleteTerminal, ListTerminals,
|
||||
};
|
||||
use rand::Rng;
|
||||
use resolver_api::Resolve;
|
||||
use serror::AddStatusCodeError;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{
|
||||
config::periphery_config,
|
||||
terminal::{
|
||||
ResizeDimensions, StdinMsg, clean_up_terminals, create_terminal,
|
||||
delete_all_terminals, delete_terminal, get_terminal,
|
||||
list_terminals,
|
||||
use futures::{SinkExt, Stream, StreamExt, TryStreamExt};
|
||||
use komodo_client::{
|
||||
api::write::TerminalRecreateMode,
|
||||
entities::{
|
||||
KOMODO_EXIT_CODE, NoData, komodo_timestamp, server::TerminalInfo,
|
||||
},
|
||||
};
|
||||
use periphery_client::api::terminal::*;
|
||||
use pin_project_lite::pin_project;
|
||||
use rand::Rng;
|
||||
use resolver_api::Resolve;
|
||||
use serror::{AddStatusCodeError, Json};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use crate::{config::periphery_config, terminal::*};
|
||||
|
||||
impl Resolve<super::Args> for ListTerminals {
|
||||
#[instrument(name = "ListTerminals", level = "debug")]
|
||||
@@ -39,12 +32,6 @@ impl Resolve<super::Args> for ListTerminals {
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<Vec<TerminalInfo>> {
|
||||
if periphery_config().disable_terminals {
|
||||
return Err(
|
||||
anyhow!("Terminals are disabled in the periphery config")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
clean_up_terminals().await;
|
||||
Ok(list_terminals().await)
|
||||
}
|
||||
@@ -69,12 +56,6 @@ impl Resolve<super::Args> for CreateTerminal {
|
||||
impl Resolve<super::Args> for DeleteTerminal {
|
||||
#[instrument(name = "DeleteTerminal", level = "debug")]
|
||||
async fn resolve(self, _: &super::Args) -> serror::Result<NoData> {
|
||||
if periphery_config().disable_terminals {
|
||||
return Err(
|
||||
anyhow!("Terminals are disabled in the periphery config")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
delete_terminal(&self.terminal).await;
|
||||
Ok(NoData {})
|
||||
}
|
||||
@@ -83,12 +64,6 @@ impl Resolve<super::Args> for DeleteTerminal {
|
||||
impl Resolve<super::Args> for DeleteAllTerminals {
|
||||
#[instrument(name = "DeleteAllTerminals", level = "debug")]
|
||||
async fn resolve(self, _: &super::Args) -> serror::Result<NoData> {
|
||||
if periphery_config().disable_terminals {
|
||||
return Err(
|
||||
anyhow!("Terminals are disabled in the periphery config")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
delete_all_terminals().await;
|
||||
Ok(NoData {})
|
||||
}
|
||||
@@ -100,12 +75,6 @@ impl Resolve<super::Args> for CreateTerminalAuthToken {
|
||||
self,
|
||||
_: &super::Args,
|
||||
) -> serror::Result<CreateTerminalAuthTokenResponse> {
|
||||
if periphery_config().disable_terminals {
|
||||
return Err(
|
||||
anyhow!("Terminals are disabled in the periphery config")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
Ok(CreateTerminalAuthTokenResponse {
|
||||
token: auth_tokens().create_auth_token(),
|
||||
})
|
||||
@@ -127,16 +96,16 @@ struct AuthTokens {
|
||||
|
||||
impl AuthTokens {
|
||||
pub fn create_auth_token(&self) -> String {
|
||||
let mut lock = self.map.lock().unwrap();
|
||||
// clear out any old tokens here (prevent unbounded growth)
|
||||
let ts = komodo_timestamp();
|
||||
lock.retain(|_, valid_until| *valid_until > ts);
|
||||
let token: String = rand::rng()
|
||||
.sample_iter(&rand::distr::Alphanumeric)
|
||||
.take(30)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
self
|
||||
.map
|
||||
.lock()
|
||||
.unwrap()
|
||||
.insert(token.clone(), komodo_timestamp() + TOKEN_VALID_FOR_MS);
|
||||
lock.insert(token.clone(), ts + TOKEN_VALID_FOR_MS);
|
||||
token
|
||||
}
|
||||
|
||||
@@ -160,11 +129,7 @@ impl AuthTokens {
|
||||
}
|
||||
|
||||
pub async fn connect_terminal(
|
||||
Query(ConnectTerminalQuery {
|
||||
token,
|
||||
terminal,
|
||||
init,
|
||||
}): Query<ConnectTerminalQuery>,
|
||||
Query(query): Query<ConnectTerminalQuery>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
if periphery_config().disable_terminals {
|
||||
@@ -173,7 +138,54 @@ pub async fn connect_terminal(
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
handle_terminal_websocket(query, ws).await
|
||||
}
|
||||
|
||||
pub async fn connect_container_exec(
|
||||
Query(ConnectContainerExecQuery {
|
||||
token,
|
||||
container,
|
||||
shell,
|
||||
}): Query<ConnectContainerExecQuery>,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
if periphery_config().disable_container_exec {
|
||||
return Err(
|
||||
anyhow!("Container exec is disabled in the periphery config")
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
if container.contains("&&") || shell.contains("&&") {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"The use of '&&' is forbidden in the container name or shell"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
// Create (recreate if shell changed)
|
||||
create_terminal(
|
||||
container.clone(),
|
||||
format!("docker exec -it {container} {shell}"),
|
||||
TerminalRecreateMode::DifferentCommand,
|
||||
)
|
||||
.await
|
||||
.context("Failed to create terminal for container exec")?;
|
||||
|
||||
handle_terminal_websocket(
|
||||
ConnectTerminalQuery {
|
||||
token,
|
||||
terminal: container,
|
||||
},
|
||||
ws,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_terminal_websocket(
|
||||
ConnectTerminalQuery { token, terminal }: ConnectTerminalQuery,
|
||||
ws: WebSocketUpgrade,
|
||||
) -> serror::Result<Response> {
|
||||
// Auth the connection with single use token
|
||||
auth_tokens().check_token(token)?;
|
||||
|
||||
@@ -189,14 +201,6 @@ pub async fn connect_terminal(
|
||||
if !b.is_empty() {
|
||||
socket.send(Message::Binary(b)).await.context("Failed to send history part b")?;
|
||||
}
|
||||
|
||||
if let Some(init) = init {
|
||||
terminal
|
||||
.stdin
|
||||
.send(StdinMsg::Bytes(Bytes::from(init + "\n")))
|
||||
.await
|
||||
.context("Failed to run init command")?
|
||||
}
|
||||
anyhow::Ok(())
|
||||
}.await;
|
||||
|
||||
@@ -331,3 +335,107 @@ pub async fn connect_terminal(
|
||||
clean_up_terminals().await;
|
||||
}))
|
||||
}
|
||||
|
||||
/// Sentinels
|
||||
const START_OF_OUTPUT: &str = "__KOMODO_START_OF_OUTPUT__";
|
||||
const END_OF_OUTPUT: &str = "__KOMODO_END_OF_OUTPUT__";
|
||||
|
||||
pub async fn execute_terminal(
|
||||
Json(ExecuteTerminalBody { terminal, command }): Json<
|
||||
ExecuteTerminalBody,
|
||||
>,
|
||||
) -> serror::Result<axum::body::Body> {
|
||||
if periphery_config().disable_terminals {
|
||||
return Err(
|
||||
anyhow!("Terminals are disabled in the periphery config")
|
||||
.status_code(StatusCode::FORBIDDEN),
|
||||
);
|
||||
}
|
||||
|
||||
let terminal = get_terminal(&terminal).await?;
|
||||
|
||||
// Read the bytes into lines
|
||||
// This is done to check the lines for the EOF sentinal
|
||||
let mut stdout = tokio_util::codec::FramedRead::new(
|
||||
tokio_util::io::StreamReader::new(
|
||||
tokio_stream::wrappers::BroadcastStream::new(
|
||||
terminal.stdout.resubscribe(),
|
||||
)
|
||||
.map(|res| res.map_err(std::io::Error::other)),
|
||||
),
|
||||
tokio_util::codec::LinesCodec::new(),
|
||||
);
|
||||
|
||||
let full_command = format!(
|
||||
"printf '\n{START_OF_OUTPUT}\n\n'; {command}; rc=$? printf '\n{KOMODO_EXIT_CODE}%d\n{END_OF_OUTPUT}\n' \"$rc\"\n"
|
||||
);
|
||||
|
||||
terminal
|
||||
.stdin
|
||||
.send(StdinMsg::Bytes(Bytes::from(full_command)))
|
||||
.await
|
||||
.context("Failed to send command to terminal stdin")?;
|
||||
|
||||
// Only start the response AFTER the start sentinel is printed
|
||||
loop {
|
||||
match stdout
|
||||
.try_next()
|
||||
.await
|
||||
.context("Failed to read stdout line")?
|
||||
{
|
||||
Some(line) if line == START_OF_OUTPUT => break,
|
||||
// Keep looping until the start sentinel received.
|
||||
Some(_) => {}
|
||||
None => {
|
||||
return Err(
|
||||
anyhow!(
|
||||
"Stdout stream terminated before start sentinel received"
|
||||
)
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(axum::body::Body::from_stream(TerminalStream { stdout }))
|
||||
}
|
||||
|
||||
pin_project! {
|
||||
struct TerminalStream<S> { #[pin] stdout: S }
|
||||
}
|
||||
|
||||
impl<S> Stream for TerminalStream<S>
|
||||
where
|
||||
S:
|
||||
Stream<Item = Result<String, tokio_util::codec::LinesCodecError>>,
|
||||
{
|
||||
// Axum expects a stream of results
|
||||
type Item = Result<String, String>;
|
||||
|
||||
fn poll_next(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
let this = self.project();
|
||||
match this.stdout.poll_next(cx) {
|
||||
Poll::Ready(None) => {
|
||||
// This is if a None comes in before END_OF_OUTPUT.
|
||||
// This probably means the terminal has exited early,
|
||||
// and needs to be cleaned up
|
||||
tokio::spawn(async move { clean_up_terminals().await });
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Poll::Ready(Some(line)) => {
|
||||
match line {
|
||||
Ok(line) if line.as_str() == END_OF_OUTPUT => {
|
||||
// Stop the stream on end sentinel
|
||||
Poll::Ready(None)
|
||||
}
|
||||
Ok(line) => Poll::Ready(Some(Ok(line + "\n"))),
|
||||
Err(e) => Poll::Ready(Some(Err(format!("{e:?}")))),
|
||||
}
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,6 +45,9 @@ pub fn periphery_config() -> &'static PeripheryConfig {
|
||||
disable_terminals: env
|
||||
.periphery_disable_terminals
|
||||
.unwrap_or(config.disable_terminals),
|
||||
disable_container_exec: env
|
||||
.periphery_disable_container_exec
|
||||
.unwrap_or(config.disable_container_exec),
|
||||
stats_polling_rate: env
|
||||
.periphery_stats_polling_rate
|
||||
.unwrap_or(config.stats_polling_rate),
|
||||
|
||||
@@ -189,6 +189,7 @@ impl StatsClient {
|
||||
fn get_system_information(
|
||||
sys: &sysinfo::System,
|
||||
) -> SystemInformation {
|
||||
let config = periphery_config();
|
||||
SystemInformation {
|
||||
name: System::name(),
|
||||
os: System::long_os_version(),
|
||||
@@ -201,6 +202,7 @@ fn get_system_information(
|
||||
.next()
|
||||
.map(|cpu| cpu.brand().to_string())
|
||||
.unwrap_or_default(),
|
||||
terminals_disabled: periphery_config().disable_terminals,
|
||||
terminals_disabled: config.disable_terminals,
|
||||
container_exec_disabled: config.disable_container_exec,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ mod deployment;
|
||||
mod procedure;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
|
||||
@@ -23,7 +22,6 @@ pub use deployment::*;
|
||||
pub use procedure::*;
|
||||
pub use repo::*;
|
||||
pub use server::*;
|
||||
pub use server_template::*;
|
||||
pub use stack::*;
|
||||
pub use sync::*;
|
||||
|
||||
@@ -128,6 +126,7 @@ pub enum Execution {
|
||||
DeployStackIfChanged(DeployStackIfChanged),
|
||||
BatchDeployStackIfChanged(BatchDeployStackIfChanged),
|
||||
PullStack(PullStack),
|
||||
BatchPullStack(BatchPullStack),
|
||||
StartStack(StartStack),
|
||||
RestartStack(RestartStack),
|
||||
PauseStack(PauseStack),
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
use derive_empty_traits::EmptyTraits;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::update::Update;
|
||||
|
||||
use super::KomodoExecuteRequest;
|
||||
|
||||
/// Launch an EC2 instance with the specified config.
|
||||
/// Response: [Update].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoExecuteRequest)]
|
||||
#[response(Update)]
|
||||
#[error(serror::Error)]
|
||||
pub struct LaunchServer {
|
||||
/// The name of the created server.
|
||||
pub name: String,
|
||||
/// The server template used to define the config.
|
||||
pub server_template: String,
|
||||
}
|
||||
@@ -152,6 +152,37 @@ pub struct PullStack {
|
||||
|
||||
//
|
||||
|
||||
/// Pulls multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Debug,
|
||||
Clone,
|
||||
PartialEq,
|
||||
Resolve,
|
||||
EmptyTraits,
|
||||
Parser,
|
||||
)]
|
||||
#[empty_traits(KomodoExecuteRequest)]
|
||||
#[response(BatchExecutionResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct BatchPullStack {
|
||||
/// Id or name or wildcard pattern or regex.
|
||||
/// Supports multiline and comma delineated combinations of the above.
|
||||
///
|
||||
/// Example:
|
||||
/// ```
|
||||
/// # match all foo-* stacks
|
||||
/// foo-*
|
||||
/// # add some more
|
||||
/// extra-stack-1, extra-stack-2
|
||||
/// ```
|
||||
pub pattern: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Starts the target stack. `docker compose start`. Response: [Update]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
|
||||
pub mod auth;
|
||||
pub mod execute;
|
||||
pub mod terminal;
|
||||
pub mod read;
|
||||
pub mod terminal;
|
||||
pub mod user;
|
||||
pub mod write;
|
||||
|
||||
@@ -14,7 +14,6 @@ mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tag;
|
||||
@@ -35,7 +34,6 @@ pub use procedure::*;
|
||||
pub use provider::*;
|
||||
pub use repo::*;
|
||||
pub use server::*;
|
||||
pub use server_template::*;
|
||||
pub use stack::*;
|
||||
pub use sync::*;
|
||||
pub use tag::*;
|
||||
@@ -106,6 +104,8 @@ pub struct GetCoreInfoResponse {
|
||||
pub disable_confirm_dialog: bool,
|
||||
/// The repo owners for which github webhook management api is available
|
||||
pub github_webhook_owners: Vec<String>,
|
||||
/// Whether to disable websocket automatic reconnect.
|
||||
pub disable_websocket_reconnect: bool,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
use derive_empty_traits::EmptyTraits;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::server_template::{
|
||||
ServerTemplate, ServerTemplateListItem, ServerTemplateQuery,
|
||||
};
|
||||
|
||||
use super::KomodoReadRequest;
|
||||
|
||||
//
|
||||
|
||||
/// Get a specific server template by id or name. Response: [ServerTemplate].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoReadRequest)]
|
||||
#[response(GetServerTemplateResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct GetServerTemplate {
|
||||
/// Id or name
|
||||
#[serde(alias = "id", alias = "name")]
|
||||
pub server_template: String,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type GetServerTemplateResponse = ServerTemplate;
|
||||
|
||||
//
|
||||
|
||||
/// List server templates matching structured query. Response: [ListServerTemplatesResponse].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Default, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoReadRequest)]
|
||||
#[response(ListServerTemplatesResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct ListServerTemplates {
|
||||
#[serde(default)]
|
||||
pub query: ServerTemplateQuery,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type ListServerTemplatesResponse = Vec<ServerTemplateListItem>;
|
||||
|
||||
//
|
||||
|
||||
/// List server templates matching structured query. Response: [ListFullServerTemplatesResponse].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Default, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoReadRequest)]
|
||||
#[response(ListFullServerTemplatesResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct ListFullServerTemplates {
|
||||
#[serde(default)]
|
||||
pub query: ServerTemplateQuery,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type ListFullServerTemplatesResponse = Vec<ServerTemplate>;
|
||||
|
||||
//
|
||||
|
||||
/// Gets a summary of data relating to all server templates.
|
||||
/// Response: [GetServerTemplatesSummaryResponse].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoReadRequest)]
|
||||
#[response(GetServerTemplatesSummaryResponse)]
|
||||
#[error(serror::Error)]
|
||||
pub struct GetServerTemplatesSummary {}
|
||||
|
||||
/// Response for [GetServerTemplatesSummary].
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct GetServerTemplatesSummaryResponse {
|
||||
/// The total number of server templates.
|
||||
pub total: u32,
|
||||
}
|
||||
@@ -9,12 +9,37 @@ pub struct ConnectTerminalQuery {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// Each periphery can keep multiple terminals open.
|
||||
/// If a terminals with the specified name already exists,
|
||||
/// it will be attached to.
|
||||
/// Otherwise a new terminal will be created for the command,
|
||||
/// which will persist until it is deleted using
|
||||
/// [DeleteTerminal][crate::api::write::server::DeleteTerminal]
|
||||
/// If a terminals with the specified name does not exist,
|
||||
/// the call will fail.
|
||||
/// Create a terminal using [CreateTerminal][super::write::server::CreateTerminal]
|
||||
pub terminal: String,
|
||||
/// Optional. The initial command to execute on connection to the shell.
|
||||
pub init: Option<String>,
|
||||
}
|
||||
|
||||
/// Query to connect to a container exec session (interactive shell over websocket) on the given server.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectContainerExecQuery {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// The container name
|
||||
pub container: String,
|
||||
/// The shell to connect to
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
/// Execute a terminal command on the given server.
|
||||
/// TODO: Document calling.
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteTerminalBody {
|
||||
/// Server Id or name
|
||||
pub server: String,
|
||||
/// The name of the terminal on the server to use to execute.
|
||||
/// If the terminal at name exists, it will be used to execute the command.
|
||||
/// Otherwise, a new terminal will be created for this command, which will
|
||||
/// persist until it exits or is deleted.
|
||||
pub terminal: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
@@ -81,7 +81,7 @@ pub struct DeleteBuild {
|
||||
#[response(Build)]
|
||||
#[error(serror::Error)]
|
||||
pub struct UpdateBuild {
|
||||
/// The id of the build to update.
|
||||
/// The id or name of the build to update.
|
||||
pub id: String,
|
||||
/// The partial config update to apply.
|
||||
pub config: _PartialBuildConfig,
|
||||
|
||||
@@ -10,7 +10,6 @@ mod procedure;
|
||||
mod provider;
|
||||
mod repo;
|
||||
mod server;
|
||||
mod server_template;
|
||||
mod stack;
|
||||
mod sync;
|
||||
mod tags;
|
||||
@@ -30,7 +29,6 @@ pub use procedure::*;
|
||||
pub use provider::*;
|
||||
pub use repo::*;
|
||||
pub use server::*;
|
||||
pub use server_template::*;
|
||||
pub use stack::*;
|
||||
pub use sync::*;
|
||||
pub use tags::*;
|
||||
|
||||
@@ -1,105 +0,0 @@
|
||||
use derive_empty_traits::EmptyTraits;
|
||||
use resolver_api::Resolve;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::entities::{
|
||||
server_template::{PartialServerTemplateConfig, ServerTemplate},
|
||||
update::Update,
|
||||
};
|
||||
|
||||
use super::KomodoWriteRequest;
|
||||
|
||||
//
|
||||
|
||||
/// Create a server template. Response: [ServerTemplate].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(ServerTemplate)]
|
||||
#[error(serror::Error)]
|
||||
pub struct CreateServerTemplate {
|
||||
/// The name given to newly created server template.
|
||||
pub name: String,
|
||||
/// Optional partial config to initialize the server template with.
|
||||
#[serde(default)]
|
||||
pub config: PartialServerTemplateConfig,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Creates a new server template with given `name` and the configuration
|
||||
/// of the server template at the given `id`. Response: [ServerTemplate]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(ServerTemplate)]
|
||||
#[error(serror::Error)]
|
||||
pub struct CopyServerTemplate {
|
||||
/// The name of the new server template.
|
||||
pub name: String,
|
||||
/// The id of the server template to copy.
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Deletes the server template at the given id, and returns the deleted server template.
|
||||
/// Response: [ServerTemplate]
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(ServerTemplate)]
|
||||
#[error(serror::Error)]
|
||||
pub struct DeleteServerTemplate {
|
||||
/// The id or name of the server template to delete.
|
||||
pub id: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Update the server template at the given id, and return the updated server template.
|
||||
/// Response: [ServerTemplate].
|
||||
///
|
||||
/// Note. This method updates only the fields which are set in the [PartialServerTemplateConfig],
|
||||
/// effectively merging diffs into the final document.
|
||||
/// This is helpful when multiple users are using
|
||||
/// the same resources concurrently by ensuring no unintentional
|
||||
/// field changes occur from out of date local state.
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(ServerTemplate)]
|
||||
#[error(serror::Error)]
|
||||
pub struct UpdateServerTemplate {
|
||||
/// The id of the server template to update.
|
||||
pub id: String,
|
||||
/// The partial config update to apply.
|
||||
pub config: PartialServerTemplateConfig,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
/// Rename the ServerTemplate at id to the given name.
|
||||
/// Response: [Update].
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Resolve, EmptyTraits,
|
||||
)]
|
||||
#[empty_traits(KomodoWriteRequest)]
|
||||
#[response(Update)]
|
||||
#[error(serror::Error)]
|
||||
pub struct RenameServerTemplate {
|
||||
/// The id or name of the ServerTemplate to rename.
|
||||
pub id: String,
|
||||
/// The new name.
|
||||
pub name: String,
|
||||
}
|
||||
@@ -133,6 +133,12 @@ pub struct ActionConfig {
|
||||
#[builder(default)]
|
||||
pub webhook_secret: String,
|
||||
|
||||
/// Whether deno will be instructed to reload all dependencies,
|
||||
/// this can usually be kept false outside of development.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub reload_deno_deps: bool,
|
||||
|
||||
/// Typescript file contents using pre-initialized `komodo` client.
|
||||
/// Supports variable / secret interpolation.
|
||||
#[serde(default, deserialize_with = "file_contents_deserializer")]
|
||||
@@ -177,6 +183,7 @@ impl Default for ActionConfig {
|
||||
failure_alert: default_failure_alert(),
|
||||
webhook_enabled: default_webhook_enabled(),
|
||||
webhook_secret: Default::default(),
|
||||
reload_deno_deps: Default::default(),
|
||||
file_contents: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -106,6 +106,8 @@ pub struct Env {
|
||||
pub komodo_disable_confirm_dialog: Option<bool>,
|
||||
/// Override `disable_non_admin_create`
|
||||
pub komodo_disable_non_admin_create: Option<bool>,
|
||||
/// Override `disable_websocket_reconnect`
|
||||
pub komodo_disable_websocket_reconnect: Option<bool>,
|
||||
|
||||
/// Override `local_auth`
|
||||
pub komodo_local_auth: Option<bool>,
|
||||
@@ -209,11 +211,6 @@ pub struct Env {
|
||||
/// Override `aws.secret_access_key` with file
|
||||
pub komodo_aws_secret_access_key_file: Option<PathBuf>,
|
||||
|
||||
/// Override `hetzner.token`
|
||||
pub komodo_hetzner_token: Option<String>,
|
||||
/// Override `hetzner.token` with file
|
||||
pub komodo_hetzner_token_file: Option<PathBuf>,
|
||||
|
||||
/// Override `ssl_enabled`.
|
||||
pub komodo_ssl_enabled: Option<bool>,
|
||||
/// Override `ssl_key_file`
|
||||
@@ -277,6 +274,10 @@ pub struct CoreConfig {
|
||||
#[serde(default)]
|
||||
pub disable_confirm_dialog: bool,
|
||||
|
||||
/// Disable the UI websocket from automatically reconnecting.
|
||||
#[serde(default)]
|
||||
pub disable_websocket_reconnect: bool,
|
||||
|
||||
/// If defined, ensure an enabled first server exists at this address.
|
||||
/// Example: `http://periphery:8120`
|
||||
#[serde(default)]
|
||||
@@ -456,10 +457,6 @@ pub struct CoreConfig {
|
||||
#[serde(default)]
|
||||
pub aws: AwsCredentials,
|
||||
|
||||
/// Configure Hetzner credentials to use with Hetzner builds / server launches.
|
||||
#[serde(default)]
|
||||
pub hetzner: HetznerCredentials,
|
||||
|
||||
// =================
|
||||
// = Git Providers =
|
||||
// =================
|
||||
@@ -601,6 +598,7 @@ impl CoreConfig {
|
||||
transparent_mode: config.transparent_mode,
|
||||
ui_write_disabled: config.ui_write_disabled,
|
||||
disable_confirm_dialog: config.disable_confirm_dialog,
|
||||
disable_websocket_reconnect: config.disable_websocket_reconnect,
|
||||
enable_new_users: config.enable_new_users,
|
||||
disable_user_registration: config.disable_user_registration,
|
||||
disable_non_admin_create: config.disable_non_admin_create,
|
||||
@@ -646,9 +644,6 @@ impl CoreConfig {
|
||||
&config.aws.secret_access_key,
|
||||
),
|
||||
},
|
||||
hetzner: HetznerCredentials {
|
||||
token: empty_or_redacted(&config.hetzner.token),
|
||||
},
|
||||
secrets: config
|
||||
.secrets
|
||||
.into_iter()
|
||||
@@ -760,12 +755,6 @@ pub struct AwsCredentials {
|
||||
pub secret_access_key: String,
|
||||
}
|
||||
|
||||
/// Provide Hetzner credentials for Komodo to use.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct HetznerCredentials {
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
/// Provide configuration for a Github Webhook app.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GithubWebhookAppConfig {
|
||||
|
||||
@@ -126,6 +126,8 @@ pub struct Env {
|
||||
pub periphery_build_dir: Option<PathBuf>,
|
||||
/// Override `disable_terminals`
|
||||
pub periphery_disable_terminals: Option<bool>,
|
||||
/// Override `disable_container_exec`
|
||||
pub periphery_disable_container_exec: Option<bool>,
|
||||
/// Override `stats_polling_rate`
|
||||
pub periphery_stats_polling_rate: Option<Timelength>,
|
||||
/// Override `legacy_compose_cli`
|
||||
@@ -205,12 +207,18 @@ pub struct PeripheryConfig {
|
||||
/// Default: empty
|
||||
pub build_dir: Option<PathBuf>,
|
||||
|
||||
/// Whether to disable the terminal APIs
|
||||
/// and disallow remote shell access.
|
||||
/// Whether to disable the create terminal
|
||||
/// and disallow direct remote shell access.
|
||||
/// Default: false
|
||||
#[serde(default)]
|
||||
pub disable_terminals: bool,
|
||||
|
||||
/// Whether to disable the container exec api
|
||||
/// and disallow remote container shell access.
|
||||
/// Default: false
|
||||
#[serde(default)]
|
||||
pub disable_container_exec: bool,
|
||||
|
||||
/// The rate at which the system stats will be polled to update the cache.
|
||||
/// Default: `5-sec`
|
||||
#[serde(default = "default_stats_polling_rate")]
|
||||
@@ -264,7 +272,7 @@ pub struct PeripheryConfig {
|
||||
pub docker_registries: Vec<DockerRegistry>,
|
||||
|
||||
/// Whether to enable ssl.
|
||||
/// Default: false (will change in later release)
|
||||
/// Default: true
|
||||
#[serde(default = "default_ssl_enabled")]
|
||||
pub ssl_enabled: bool,
|
||||
|
||||
@@ -294,7 +302,7 @@ fn default_stats_polling_rate() -> Timelength {
|
||||
}
|
||||
|
||||
fn default_ssl_enabled() -> bool {
|
||||
false
|
||||
true
|
||||
}
|
||||
|
||||
impl Default for PeripheryConfig {
|
||||
@@ -307,6 +315,7 @@ impl Default for PeripheryConfig {
|
||||
stack_dir: None,
|
||||
build_dir: None,
|
||||
disable_terminals: Default::default(),
|
||||
disable_container_exec: Default::default(),
|
||||
stats_polling_rate: default_stats_polling_rate(),
|
||||
legacy_compose_cli: Default::default(),
|
||||
logging: Default::default(),
|
||||
@@ -334,6 +343,7 @@ impl PeripheryConfig {
|
||||
stack_dir: self.stack_dir.clone(),
|
||||
build_dir: self.build_dir.clone(),
|
||||
disable_terminals: self.disable_terminals,
|
||||
disable_container_exec: self.disable_container_exec,
|
||||
stats_polling_rate: self.stats_polling_rate,
|
||||
legacy_compose_cli: self.legacy_compose_cli,
|
||||
logging: self.logging.clone(),
|
||||
|
||||
@@ -54,8 +54,6 @@ pub mod repo;
|
||||
pub mod resource;
|
||||
/// Subtypes of [Server][server::Server].
|
||||
pub mod server;
|
||||
/// Subtypes of [ServerTemplate][server_template::ServerTemplate].
|
||||
pub mod server_template;
|
||||
/// Subtypes of [Stack][stack::Stack]
|
||||
pub mod stack;
|
||||
/// Subtypes for server stats reporting.
|
||||
@@ -759,13 +757,6 @@ pub enum Operation {
|
||||
DeleteAlerter,
|
||||
TestAlerter,
|
||||
|
||||
// server template
|
||||
CreateServerTemplate,
|
||||
UpdateServerTemplate,
|
||||
RenameServerTemplate,
|
||||
DeleteServerTemplate,
|
||||
LaunchServer,
|
||||
|
||||
// sync
|
||||
CreateResourceSync,
|
||||
UpdateResourceSync,
|
||||
@@ -878,7 +869,6 @@ pub enum ResourceTarget {
|
||||
Action(String),
|
||||
Builder(String),
|
||||
Alerter(String),
|
||||
ServerTemplate(String),
|
||||
ResourceSync(String),
|
||||
}
|
||||
|
||||
@@ -897,7 +887,6 @@ impl ResourceTarget {
|
||||
ResourceTarget::Alerter(id) => id,
|
||||
ResourceTarget::Procedure(id) => id,
|
||||
ResourceTarget::Action(id) => id,
|
||||
ResourceTarget::ServerTemplate(id) => id,
|
||||
ResourceTarget::ResourceSync(id) => id,
|
||||
};
|
||||
(self.extract_variant(), id)
|
||||
@@ -956,12 +945,6 @@ impl From<&procedure::Procedure> for ResourceTarget {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&server_template::ServerTemplate> for ResourceTarget {
|
||||
fn from(server_template: &server_template::ServerTemplate) -> Self {
|
||||
Self::ServerTemplate(server_template.id.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&sync::ResourceSync> for ResourceTarget {
|
||||
fn from(resource_sync: &sync::ResourceSync) -> Self {
|
||||
Self::ResourceSync(resource_sync.id.clone())
|
||||
@@ -992,7 +975,6 @@ impl ResourceTargetVariant {
|
||||
ResourceTargetVariant::Repo => "repo",
|
||||
ResourceTargetVariant::Alerter => "alerter",
|
||||
ResourceTargetVariant::Procedure => "procedure",
|
||||
ResourceTargetVariant::ServerTemplate => "server_template",
|
||||
ResourceTargetVariant::ResourceSync => "resource_sync",
|
||||
ResourceTargetVariant::Stack => "stack",
|
||||
ResourceTargetVariant::Action => "action",
|
||||
@@ -1010,4 +992,5 @@ pub enum ScheduleFormat {
|
||||
Cron,
|
||||
}
|
||||
|
||||
pub const KOMODO_EXIT_DATA: &str = "__KOMODO_EXIT_DATA:";
|
||||
/// Used with ExecuteTerminal to capture the exit code
|
||||
pub const KOMODO_EXIT_CODE: &str = "__KOMODO_EXIT_CODE:";
|
||||
|
||||
@@ -40,6 +40,8 @@ pub struct ServerListItemInfo {
|
||||
pub send_disk_alerts: bool,
|
||||
/// Whether terminals are disabled for this Server.
|
||||
pub terminals_disabled: bool,
|
||||
/// Whether container exec is disabled for this Server.
|
||||
pub container_exec_disabled: bool,
|
||||
}
|
||||
|
||||
#[typeshare(serialized_as = "Partial<ServerConfig>")]
|
||||
|
||||
@@ -1,216 +0,0 @@
|
||||
use derive_builder::Builder;
|
||||
use partial_derive2::Partial;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
deserializers::{
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
},
|
||||
entities::builder::AwsBuilderConfig,
|
||||
};
|
||||
|
||||
#[typeshare(serialized_as = "Partial<AwsServerTemplateConfig>")]
|
||||
pub type _PartialAwsServerTemplateConfig =
|
||||
PartialAwsServerTemplateConfig;
|
||||
|
||||
/// Aws EC2 instance config.
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Builder, Partial)]
|
||||
#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
pub struct AwsServerTemplateConfig {
|
||||
/// The aws region to launch the server in, eg. us-east-1
|
||||
#[serde(default = "default_region")]
|
||||
#[builder(default = "default_region()")]
|
||||
#[partial_default(default_region())]
|
||||
pub region: String,
|
||||
/// The instance type to launch, eg. c5.2xlarge
|
||||
#[serde(default = "default_instance_type")]
|
||||
#[builder(default = "default_instance_type()")]
|
||||
#[partial_default(default_instance_type())]
|
||||
pub instance_type: String,
|
||||
/// Specify the ami id to use. Must be set up to start the periphery binary on startup.
|
||||
pub ami_id: String,
|
||||
/// The subnet to assign to the instance.
|
||||
pub subnet_id: String,
|
||||
/// The key pair name to give to the instance in case SSH access required.
|
||||
pub key_pair_name: String,
|
||||
/// Assign a public ip to the instance. Depending on how your network is
|
||||
/// setup, this may be required for the instance to reach the public internet.
|
||||
#[serde(default = "default_assign_public_ip")]
|
||||
#[builder(default = "default_assign_public_ip()")]
|
||||
#[partial_default(default_assign_public_ip())]
|
||||
pub assign_public_ip: bool,
|
||||
/// Use the instances public ip as the address for the server.
|
||||
/// Could be used when build instances are created in another non-interconnected network to the core api.
|
||||
#[serde(default = "default_use_public_ip")]
|
||||
#[builder(default = "default_use_public_ip()")]
|
||||
#[partial_default(default_use_public_ip())]
|
||||
pub use_public_ip: bool,
|
||||
/// The port periphery will be running on in AMI.
|
||||
/// Default: `8120`
|
||||
#[serde(default = "default_port")]
|
||||
#[builder(default = "default_port()")]
|
||||
#[partial_default(default_port())]
|
||||
pub port: i32,
|
||||
/// Whether Periphery will be running on https
|
||||
#[serde(default = "default_use_https")]
|
||||
#[builder(default = "default_use_https()")]
|
||||
#[partial_default(default_use_https())]
|
||||
pub use_https: bool,
|
||||
/// The security groups to give to the instance.
|
||||
#[serde(default, deserialize_with = "string_list_deserializer")]
|
||||
#[partial_attr(serde(
|
||||
default,
|
||||
deserialize_with = "option_string_list_deserializer"
|
||||
))]
|
||||
#[builder(default)]
|
||||
pub security_group_ids: Vec<String>,
|
||||
/// Specify the EBS volumes to attach.
|
||||
#[serde(default = "default_volumes")]
|
||||
#[builder(default = "default_volumes()")]
|
||||
#[partial_default(default_volumes())]
|
||||
pub volumes: Vec<AwsVolume>,
|
||||
/// The user data to deploy the instance with.
|
||||
#[serde(default = "default_user_data")]
|
||||
#[builder(default = "default_user_data()")]
|
||||
#[partial_default(default_user_data())]
|
||||
pub user_data: String,
|
||||
}
|
||||
|
||||
impl AwsServerTemplateConfig {
|
||||
pub fn builder() -> AwsServerTemplateConfigBuilder {
|
||||
AwsServerTemplateConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_region() -> String {
|
||||
String::from("us-east-1")
|
||||
}
|
||||
|
||||
fn default_instance_type() -> String {
|
||||
String::from("t3.small")
|
||||
}
|
||||
|
||||
fn default_assign_public_ip() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_use_public_ip() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn default_volumes() -> Vec<AwsVolume> {
|
||||
vec![AwsVolume {
|
||||
device_name: "/dev/sda1".to_string(),
|
||||
size_gb: 20,
|
||||
volume_type: AwsVolumeType::Gp2,
|
||||
iops: 0,
|
||||
throughput: 0,
|
||||
}]
|
||||
}
|
||||
|
||||
fn default_port() -> i32 {
|
||||
8120
|
||||
}
|
||||
|
||||
fn default_use_https() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_user_data() -> String {
|
||||
String::from("#!/bin/bash
|
||||
apt update
|
||||
apt upgrade -y
|
||||
curl -fsSL https://get.docker.com | sh
|
||||
systemctl enable docker.service
|
||||
systemctl enable containerd.service
|
||||
curl -sSL https://raw.githubusercontent.com/moghtech/komodo/main/scripts/setup-periphery.py | HOME=/root python3
|
||||
systemctl enable periphery.service")
|
||||
}
|
||||
|
||||
impl Default for AwsServerTemplateConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
region: default_region(),
|
||||
instance_type: default_instance_type(),
|
||||
assign_public_ip: default_assign_public_ip(),
|
||||
use_public_ip: default_use_public_ip(),
|
||||
port: default_port(),
|
||||
use_https: default_use_https(),
|
||||
volumes: default_volumes(),
|
||||
ami_id: Default::default(),
|
||||
subnet_id: Default::default(),
|
||||
key_pair_name: Default::default(),
|
||||
user_data: default_user_data(),
|
||||
security_group_ids: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// For information on AWS volumes, see
|
||||
/// `<https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html>`.
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AwsVolume {
|
||||
/// The device name (for example, `/dev/sda1` or `xvdh`).
|
||||
pub device_name: String,
|
||||
/// The size of the volume in GB
|
||||
pub size_gb: i32,
|
||||
/// The type of volume. Options: gp2, gp3, io1, io2.
|
||||
pub volume_type: AwsVolumeType,
|
||||
/// The iops of the volume, or 0 for AWS default.
|
||||
pub iops: i32,
|
||||
/// The throughput of the volume, or 0 for AWS default.
|
||||
pub throughput: i32,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
Default,
|
||||
PartialEq,
|
||||
Eq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Display,
|
||||
AsRefStr,
|
||||
)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[strum(serialize_all = "lowercase")]
|
||||
pub enum AwsVolumeType {
|
||||
#[default]
|
||||
Gp2,
|
||||
Gp3,
|
||||
Io1,
|
||||
Io2,
|
||||
}
|
||||
|
||||
impl AwsServerTemplateConfig {
|
||||
pub fn from_builder_config(value: &AwsBuilderConfig) -> Self {
|
||||
Self {
|
||||
region: value.region.clone(),
|
||||
instance_type: value.instance_type.clone(),
|
||||
volumes: vec![AwsVolume {
|
||||
device_name: "/dev/sda1".to_string(),
|
||||
size_gb: value.volume_gb,
|
||||
volume_type: AwsVolumeType::Gp2,
|
||||
iops: 0,
|
||||
throughput: 0,
|
||||
}],
|
||||
ami_id: value.ami_id.clone(),
|
||||
subnet_id: value.subnet_id.clone(),
|
||||
security_group_ids: value.security_group_ids.clone(),
|
||||
key_pair_name: value.key_pair_name.clone(),
|
||||
assign_public_ip: value.assign_public_ip,
|
||||
use_public_ip: value.use_public_ip,
|
||||
port: value.port,
|
||||
use_https: value.use_https,
|
||||
user_data: value.user_data.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,255 +0,0 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use derive_builder::Builder;
|
||||
use partial_derive2::Partial;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::AsRefStr;
|
||||
use typeshare::typeshare;
|
||||
|
||||
use crate::{
|
||||
deserializers::{
|
||||
option_string_list_deserializer, string_list_deserializer,
|
||||
},
|
||||
entities::I64,
|
||||
};
|
||||
|
||||
#[typeshare(serialized_as = "Partial<HetznerServerTemplateConfig>")]
|
||||
pub type _PartialHetznerServerTemplateConfig =
|
||||
PartialHetznerServerTemplateConfig;
|
||||
|
||||
/// Hetzner server config.
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Builder, Partial)]
|
||||
#[partial_derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[partial(skip_serializing_none, from, diff)]
|
||||
pub struct HetznerServerTemplateConfig {
|
||||
/// ID or name of the Image the Server is created from
|
||||
#[serde(default = "default_image")]
|
||||
#[builder(default = "default_image()")]
|
||||
#[partial_default(default_image())]
|
||||
pub image: String,
|
||||
/// ID or name of Datacenter to create Server in
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub datacenter: HetznerDatacenter,
|
||||
/// ID of the Placement Group the server should be in,
|
||||
/// Or 0 to not use placement group.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub placement_group: I64,
|
||||
/// ID or name of the Server type this Server should be created with
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub server_type: HetznerServerType,
|
||||
/// SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time
|
||||
#[serde(default, deserialize_with = "string_list_deserializer")]
|
||||
#[partial_attr(serde(
|
||||
default,
|
||||
deserialize_with = "option_string_list_deserializer"
|
||||
))]
|
||||
#[builder(default)]
|
||||
pub ssh_keys: Vec<String>,
|
||||
/// Network IDs which should be attached to the Server private network interface at the creation time
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub private_network_ids: Vec<I64>,
|
||||
/// Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub enable_public_ipv4: bool,
|
||||
/// Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub enable_public_ipv6: bool,
|
||||
/// Connect to the instance using it's public ip.
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub use_public_ip: bool,
|
||||
/// The port periphery will be running on in AMI.
|
||||
/// Default: `8120`
|
||||
#[serde(default = "default_port")]
|
||||
#[builder(default = "default_port()")]
|
||||
#[partial_default(default_port())]
|
||||
pub port: i32,
|
||||
/// Whether Periphery will be running on https
|
||||
#[serde(default = "default_use_https")]
|
||||
#[builder(default = "default_use_https()")]
|
||||
#[partial_default(default_use_https())]
|
||||
pub use_https: bool,
|
||||
/// The firewalls to attach to the instance
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub firewall_ids: Vec<I64>,
|
||||
/// Labels for the server
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub labels: HashMap<String, String>,
|
||||
/// Specs for volumes to attach
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub volumes: Vec<HetznerVolumeSpecs>,
|
||||
/// Cloud-Init user data to use during Server creation. This field is limited to 32KiB.
|
||||
#[serde(default = "default_user_data")]
|
||||
#[builder(default = "default_user_data()")]
|
||||
#[partial_default(default_user_data())]
|
||||
pub user_data: String,
|
||||
}
|
||||
|
||||
impl HetznerServerTemplateConfig {
|
||||
pub fn builder() -> HetznerServerTemplateConfigBuilder {
|
||||
HetznerServerTemplateConfigBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_image() -> String {
|
||||
String::from("ubuntu-24.04")
|
||||
}
|
||||
|
||||
fn default_port() -> i32 {
|
||||
8120
|
||||
}
|
||||
|
||||
fn default_use_https() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_user_data() -> String {
|
||||
String::from("#cloud-config
|
||||
runcmd:
|
||||
- apt update
|
||||
- apt upgrade -y
|
||||
- curl -fsSL https://get.docker.com | sh
|
||||
- systemctl enable docker.service
|
||||
- systemctl enable containerd.service
|
||||
- curl -sSL 'https://raw.githubusercontent.com/moghtech/komodo/main/scripts/setup-periphery.py' | HOME=/root python3
|
||||
- systemctl enable periphery.service")
|
||||
}
|
||||
|
||||
impl Default for HetznerServerTemplateConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
port: default_port(),
|
||||
use_https: default_use_https(),
|
||||
image: default_image(),
|
||||
datacenter: Default::default(),
|
||||
private_network_ids: Default::default(),
|
||||
placement_group: Default::default(),
|
||||
enable_public_ipv4: Default::default(),
|
||||
enable_public_ipv6: Default::default(),
|
||||
firewall_ids: Default::default(),
|
||||
server_type: Default::default(),
|
||||
ssh_keys: Default::default(),
|
||||
user_data: default_user_data(),
|
||||
use_public_ip: Default::default(),
|
||||
labels: Default::default(),
|
||||
volumes: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, PartialEq, Serialize, Deserialize, Builder,
|
||||
)]
|
||||
pub struct HetznerVolumeSpecs {
|
||||
/// A name for the volume
|
||||
pub name: String,
|
||||
/// Size of the volume in GB
|
||||
pub size_gb: I64,
|
||||
/// The format for the volume
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub format: HetznerVolumeFormat,
|
||||
/// Labels for the volume
|
||||
#[serde(default)]
|
||||
#[builder(default)]
|
||||
pub labels: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, Deserialize,
|
||||
)]
|
||||
pub enum HetznerVolumeFormat {
|
||||
#[default]
|
||||
Xfs,
|
||||
Ext4,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
Default,
|
||||
PartialEq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
AsRefStr,
|
||||
)]
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
pub enum HetznerServerType {
|
||||
// Shared
|
||||
#[default]
|
||||
/// CPX11 - AMD 2 Cores, 2 Gb Ram, 40 Gb disk
|
||||
SharedAmd2Core2Ram40Disk,
|
||||
/// CAX11 - Arm 2 Cores, 4 Gb Ram, 40 Gb disk
|
||||
SharedArm2Core4Ram40Disk,
|
||||
/// CX22 - Intel 2 Cores, 4 Gb Ram, 40 Gb disk
|
||||
SharedIntel2Core4Ram40Disk,
|
||||
/// CPX21 - AMD 3 Cores, 4 Gb Ram, 80 Gb disk
|
||||
SharedAmd3Core4Ram80Disk,
|
||||
/// CAX21 - Arm 4 Cores, 8 Gb Ram, 80 Gb disk
|
||||
SharedArm4Core8Ram80Disk,
|
||||
/// CX32 - Intel 4 Cores, 8 Gb Ram, 80 Gb disk
|
||||
SharedIntel4Core8Ram80Disk,
|
||||
/// CPX31 - AMD 4 Cores, 8 Gb Ram, 160 Gb disk
|
||||
SharedAmd4Core8Ram160Disk,
|
||||
/// CAX31 - Arm 8 Cores, 16 Gb Ram, 160 Gb disk
|
||||
SharedArm8Core16Ram160Disk,
|
||||
/// CX42 - Intel 8 Cores, 16 Gb Ram, 160 Gb disk
|
||||
SharedIntel8Core16Ram160Disk,
|
||||
/// CPX41 - AMD 8 Cores, 16 Gb Ram, 240 Gb disk
|
||||
SharedAmd8Core16Ram240Disk,
|
||||
/// CAX41 - Arm 16 Cores, 32 Gb Ram, 320 Gb disk
|
||||
SharedArm16Core32Ram320Disk,
|
||||
/// CX52 - Intel 16 Cores, 32 Gb Ram, 320 Gb disk
|
||||
SharedIntel16Core32Ram320Disk,
|
||||
/// CPX51 - AMD 16 Cores, 32 Gb Ram, 360 Gb disk
|
||||
SharedAmd16Core32Ram360Disk,
|
||||
|
||||
// Dedicated
|
||||
/// CCX13 - AMD 2 Cores, 8 Gb Ram, 80 Gb disk
|
||||
DedicatedAmd2Core8Ram80Disk,
|
||||
/// CCX23 - AMD 4 Cores, 16 Gb Ram, 160 Gb disk
|
||||
DedicatedAmd4Core16Ram160Disk,
|
||||
/// CCX33 - AMD 8 Cores, 32 Gb Ram, 240 Gb disk
|
||||
DedicatedAmd8Core32Ram240Disk,
|
||||
/// CCX43 - AMD 16 Cores, 64 Gb Ram, 360 Gb disk
|
||||
DedicatedAmd16Core64Ram360Disk,
|
||||
/// CCX53 - AMD 32 Cores, 128 Gb Ram, 600 Gb disk
|
||||
DedicatedAmd32Core128Ram600Disk,
|
||||
/// CCX63 - AMD 48 Cores, 192 Gb Ram, 960 Gb disk
|
||||
DedicatedAmd48Core192Ram960Disk,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
Default,
|
||||
PartialEq,
|
||||
Serialize,
|
||||
Deserialize,
|
||||
AsRefStr,
|
||||
)]
|
||||
pub enum HetznerDatacenter {
|
||||
#[default]
|
||||
Nuremberg1Dc3,
|
||||
Helsinki1Dc2,
|
||||
Falkenstein1Dc14,
|
||||
AshburnDc1,
|
||||
HillsboroDc1,
|
||||
SingaporeDc1,
|
||||
}
|
||||
@@ -1,312 +0,0 @@
|
||||
use bson::{Document, doc};
|
||||
use derive_default_builder::DefaultBuilder;
|
||||
use derive_variants::EnumVariants;
|
||||
use partial_derive2::{Diff, MaybeNone, PartialDiff};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use strum::{AsRefStr, Display, EnumString};
|
||||
use typeshare::typeshare;
|
||||
|
||||
use self::{
|
||||
aws::AwsServerTemplateConfig, hetzner::HetznerServerTemplateConfig,
|
||||
};
|
||||
|
||||
use super::{
|
||||
MergePartial,
|
||||
resource::{AddFilters, Resource, ResourceListItem, ResourceQuery},
|
||||
};
|
||||
|
||||
pub mod aws;
|
||||
pub mod hetzner;
|
||||
|
||||
#[typeshare]
|
||||
pub type ServerTemplate = Resource<ServerTemplateConfig, ()>;
|
||||
|
||||
#[typeshare]
|
||||
pub type ServerTemplateListItem =
|
||||
ResourceListItem<ServerTemplateListItemInfo>;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ServerTemplateListItemInfo {
|
||||
/// The cloud provider
|
||||
pub provider: String,
|
||||
/// The instance type, eg c5.2xlarge on for Aws templates
|
||||
pub instance_type: Option<String>,
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, EnumVariants)]
|
||||
#[variant_derive(
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
Display,
|
||||
EnumString,
|
||||
AsRefStr
|
||||
)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum ServerTemplateConfig {
|
||||
/// Template to launch an AWS EC2 instance
|
||||
Aws(aws::AwsServerTemplateConfig),
|
||||
/// Template to launch a Hetzner server
|
||||
Hetzner(hetzner::HetznerServerTemplateConfig),
|
||||
}
|
||||
|
||||
impl Default for ServerTemplateConfig {
|
||||
fn default() -> Self {
|
||||
Self::Aws(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, EnumVariants)]
|
||||
#[variant_derive(
|
||||
Serialize,
|
||||
Deserialize,
|
||||
Debug,
|
||||
Clone,
|
||||
Copy,
|
||||
Display,
|
||||
EnumString,
|
||||
AsRefStr
|
||||
)]
|
||||
#[serde(tag = "type", content = "params")]
|
||||
pub enum PartialServerTemplateConfig {
|
||||
Aws(#[serde(default)] aws::_PartialAwsServerTemplateConfig),
|
||||
Hetzner(
|
||||
#[serde(default)] hetzner::_PartialHetznerServerTemplateConfig,
|
||||
),
|
||||
}
|
||||
|
||||
impl Default for PartialServerTemplateConfig {
|
||||
fn default() -> Self {
|
||||
Self::Aws(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl MaybeNone for PartialServerTemplateConfig {
|
||||
fn is_none(&self) -> bool {
|
||||
match self {
|
||||
PartialServerTemplateConfig::Aws(config) => config.is_none(),
|
||||
PartialServerTemplateConfig::Hetzner(config) => {
|
||||
config.is_none()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum ServerTemplateConfigDiff {
|
||||
Aws(aws::AwsServerTemplateConfigDiff),
|
||||
Hetzner(hetzner::HetznerServerTemplateConfigDiff),
|
||||
}
|
||||
|
||||
impl From<ServerTemplateConfigDiff> for PartialServerTemplateConfig {
|
||||
fn from(value: ServerTemplateConfigDiff) -> Self {
|
||||
match value {
|
||||
ServerTemplateConfigDiff::Aws(diff) => {
|
||||
PartialServerTemplateConfig::Aws(diff.into())
|
||||
}
|
||||
ServerTemplateConfigDiff::Hetzner(diff) => {
|
||||
PartialServerTemplateConfig::Hetzner(diff.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Diff for ServerTemplateConfigDiff {
|
||||
fn iter_field_diffs(
|
||||
&self,
|
||||
) -> impl Iterator<Item = partial_derive2::FieldDiff> {
|
||||
match self {
|
||||
ServerTemplateConfigDiff::Aws(diff) => {
|
||||
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
|
||||
}
|
||||
ServerTemplateConfigDiff::Hetzner(diff) => {
|
||||
diff.iter_field_diffs().collect::<Vec<_>>().into_iter()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl
|
||||
PartialDiff<PartialServerTemplateConfig, ServerTemplateConfigDiff>
|
||||
for ServerTemplateConfig
|
||||
{
|
||||
fn partial_diff(
|
||||
&self,
|
||||
partial: PartialServerTemplateConfig,
|
||||
) -> ServerTemplateConfigDiff {
|
||||
match self {
|
||||
ServerTemplateConfig::Aws(original) => match partial {
|
||||
PartialServerTemplateConfig::Aws(partial) => {
|
||||
ServerTemplateConfigDiff::Aws(
|
||||
original.partial_diff(partial),
|
||||
)
|
||||
}
|
||||
PartialServerTemplateConfig::Hetzner(partial) => {
|
||||
let default = HetznerServerTemplateConfig::default();
|
||||
ServerTemplateConfigDiff::Hetzner(
|
||||
default.partial_diff(partial),
|
||||
)
|
||||
}
|
||||
},
|
||||
ServerTemplateConfig::Hetzner(original) => match partial {
|
||||
PartialServerTemplateConfig::Hetzner(partial) => {
|
||||
ServerTemplateConfigDiff::Hetzner(
|
||||
original.partial_diff(partial),
|
||||
)
|
||||
}
|
||||
PartialServerTemplateConfig::Aws(partial) => {
|
||||
let default = AwsServerTemplateConfig::default();
|
||||
ServerTemplateConfigDiff::Aws(default.partial_diff(partial))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MaybeNone for ServerTemplateConfigDiff {
|
||||
fn is_none(&self) -> bool {
|
||||
match self {
|
||||
ServerTemplateConfigDiff::Aws(config) => config.is_none(),
|
||||
ServerTemplateConfigDiff::Hetzner(config) => config.is_none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PartialServerTemplateConfig> for ServerTemplateConfig {
|
||||
fn from(
|
||||
value: PartialServerTemplateConfig,
|
||||
) -> ServerTemplateConfig {
|
||||
match value {
|
||||
PartialServerTemplateConfig::Aws(config) => {
|
||||
ServerTemplateConfig::Aws(config.into())
|
||||
}
|
||||
PartialServerTemplateConfig::Hetzner(config) => {
|
||||
ServerTemplateConfig::Hetzner(config.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ServerTemplateConfig> for PartialServerTemplateConfig {
|
||||
fn from(value: ServerTemplateConfig) -> Self {
|
||||
match value {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
PartialServerTemplateConfig::Aws(config.into())
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
PartialServerTemplateConfig::Hetzner(config.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MergePartial for ServerTemplateConfig {
|
||||
type Partial = PartialServerTemplateConfig;
|
||||
fn merge_partial(
|
||||
self,
|
||||
partial: PartialServerTemplateConfig,
|
||||
) -> ServerTemplateConfig {
|
||||
match partial {
|
||||
PartialServerTemplateConfig::Aws(partial) => match self {
|
||||
ServerTemplateConfig::Aws(config) => {
|
||||
let config = aws::AwsServerTemplateConfig {
|
||||
region: partial.region.unwrap_or(config.region),
|
||||
instance_type: partial
|
||||
.instance_type
|
||||
.unwrap_or(config.instance_type),
|
||||
volumes: partial.volumes.unwrap_or(config.volumes),
|
||||
ami_id: partial.ami_id.unwrap_or(config.ami_id),
|
||||
subnet_id: partial.subnet_id.unwrap_or(config.subnet_id),
|
||||
security_group_ids: partial
|
||||
.security_group_ids
|
||||
.unwrap_or(config.security_group_ids),
|
||||
key_pair_name: partial
|
||||
.key_pair_name
|
||||
.unwrap_or(config.key_pair_name),
|
||||
assign_public_ip: partial
|
||||
.assign_public_ip
|
||||
.unwrap_or(config.assign_public_ip),
|
||||
use_public_ip: partial
|
||||
.use_public_ip
|
||||
.unwrap_or(config.use_public_ip),
|
||||
port: partial.port.unwrap_or(config.port),
|
||||
use_https: partial.use_https.unwrap_or(config.use_https),
|
||||
user_data: partial.user_data.unwrap_or(config.user_data),
|
||||
};
|
||||
ServerTemplateConfig::Aws(config)
|
||||
}
|
||||
ServerTemplateConfig::Hetzner(_) => {
|
||||
ServerTemplateConfig::Aws(partial.into())
|
||||
}
|
||||
},
|
||||
PartialServerTemplateConfig::Hetzner(partial) => match self {
|
||||
ServerTemplateConfig::Hetzner(config) => {
|
||||
let config = hetzner::HetznerServerTemplateConfig {
|
||||
image: partial.image.unwrap_or(config.image),
|
||||
datacenter: partial
|
||||
.datacenter
|
||||
.unwrap_or(config.datacenter),
|
||||
private_network_ids: partial
|
||||
.private_network_ids
|
||||
.unwrap_or(config.private_network_ids),
|
||||
placement_group: partial
|
||||
.placement_group
|
||||
.unwrap_or(config.placement_group),
|
||||
enable_public_ipv4: partial
|
||||
.enable_public_ipv4
|
||||
.unwrap_or(config.enable_public_ipv4),
|
||||
enable_public_ipv6: partial
|
||||
.enable_public_ipv6
|
||||
.unwrap_or(config.enable_public_ipv6),
|
||||
firewall_ids: partial
|
||||
.firewall_ids
|
||||
.unwrap_or(config.firewall_ids),
|
||||
server_type: partial
|
||||
.server_type
|
||||
.unwrap_or(config.server_type),
|
||||
ssh_keys: partial.ssh_keys.unwrap_or(config.ssh_keys),
|
||||
user_data: partial.user_data.unwrap_or(config.user_data),
|
||||
use_public_ip: partial
|
||||
.use_public_ip
|
||||
.unwrap_or(config.use_public_ip),
|
||||
labels: partial.labels.unwrap_or(config.labels),
|
||||
volumes: partial.volumes.unwrap_or(config.volumes),
|
||||
port: partial.port.unwrap_or(config.port),
|
||||
use_https: partial.use_https.unwrap_or(config.use_https),
|
||||
};
|
||||
ServerTemplateConfig::Hetzner(config)
|
||||
}
|
||||
ServerTemplateConfig::Aws(_) => {
|
||||
ServerTemplateConfig::Hetzner(partial.into())
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[typeshare]
|
||||
pub type ServerTemplateQuery =
|
||||
ResourceQuery<ServerTemplateQuerySpecifics>;
|
||||
|
||||
#[typeshare]
|
||||
#[derive(
|
||||
Serialize, Deserialize, Debug, Clone, Default, DefaultBuilder,
|
||||
)]
|
||||
pub struct ServerTemplateQuerySpecifics {
|
||||
pub types: Vec<ServerTemplateConfigVariant>,
|
||||
}
|
||||
|
||||
impl AddFilters for ServerTemplateQuerySpecifics {
|
||||
fn add_filters(&self, filters: &mut Document) {
|
||||
let types =
|
||||
self.types.iter().map(|t| t.as_ref()).collect::<Vec<_>>();
|
||||
if !self.types.is_empty() {
|
||||
filters.insert("config.type", doc! { "$in": types });
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -21,8 +21,10 @@ pub struct SystemInformation {
|
||||
pub host_name: Option<String>,
|
||||
/// The CPU's brand
|
||||
pub cpu_brand: String,
|
||||
/// Whether terminals are disabled on this Periphery
|
||||
/// Whether terminals are disabled on this Periphery server
|
||||
pub terminals_disabled: bool,
|
||||
/// Whether container exec is disabled on this Periphery server
|
||||
pub container_exec_disabled: bool,
|
||||
}
|
||||
|
||||
/// System stats stored on the database.
|
||||
|
||||
@@ -10,7 +10,6 @@ use super::{
|
||||
deployment::_PartialDeploymentConfig, permission::PermissionLevel,
|
||||
procedure::_PartialProcedureConfig, repo::_PartialRepoConfig,
|
||||
server::_PartialServerConfig,
|
||||
server_template::PartialServerTemplateConfig,
|
||||
stack::_PartialStackConfig, sync::_PartialResourceSyncConfig,
|
||||
variable::Variable,
|
||||
};
|
||||
@@ -82,14 +81,6 @@ pub struct ResourcesToml {
|
||||
)]
|
||||
pub builders: Vec<ResourceToml<_PartialBuilderConfig>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
alias = "server_template",
|
||||
skip_serializing_if = "Vec::is_empty"
|
||||
)]
|
||||
pub server_templates:
|
||||
Vec<ResourceToml<PartialServerTemplateConfig>>,
|
||||
|
||||
#[serde(
|
||||
default,
|
||||
alias = "resource_sync",
|
||||
|
||||
@@ -39,6 +39,7 @@ pub mod busy;
|
||||
pub mod deserializers;
|
||||
pub mod entities;
|
||||
pub mod parsers;
|
||||
pub mod terminal;
|
||||
pub mod ws;
|
||||
|
||||
mod request;
|
||||
|
||||
18
client/core/rs/src/terminal.rs
Normal file
18
client/core/rs/src/terminal.rs
Normal file
@@ -0,0 +1,18 @@
|
||||
use futures::{Stream, StreamExt, TryStreamExt};
|
||||
|
||||
pub struct TerminalStreamResponse(pub reqwest::Response);
|
||||
|
||||
impl TerminalStreamResponse {
|
||||
pub fn into_line_stream(
|
||||
self,
|
||||
) -> impl Stream<Item = Result<String, tokio_util::codec::LinesCodecError>>
|
||||
{
|
||||
tokio_util::codec::FramedRead::new(
|
||||
tokio_util::io::StreamReader::new(
|
||||
self.0.bytes_stream().map_err(std::io::Error::other),
|
||||
),
|
||||
tokio_util::codec::LinesCodec::new(),
|
||||
)
|
||||
.map(|line| line.map(|line| line + "\n"))
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "komodo_client",
|
||||
"version": "1.17.4",
|
||||
"version": "1.17.5",
|
||||
"description": "Komodo client package",
|
||||
"homepage": "https://komo.do",
|
||||
"main": "dist/lib.js",
|
||||
|
||||
@@ -8,8 +8,10 @@ import {
|
||||
import {
|
||||
AuthRequest,
|
||||
BatchExecutionResponse,
|
||||
ConnectContainerExecQuery,
|
||||
ConnectTerminalQuery,
|
||||
ExecuteRequest,
|
||||
ExecuteTerminalBody,
|
||||
ReadRequest,
|
||||
Update,
|
||||
UpdateListItem,
|
||||
@@ -43,15 +45,16 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
secret: options.type === "api-key" ? options.params.secret : undefined,
|
||||
};
|
||||
|
||||
const request = <Req, Res>(
|
||||
const request = <Params, Res>(
|
||||
path: "/auth" | "/user" | "/read" | "/execute" | "/write",
|
||||
request: Req
|
||||
type: string,
|
||||
params: Params
|
||||
): Promise<Res> =>
|
||||
new Promise(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + path, {
|
||||
let response = await fetch(`${url}${path}/${type}`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
body: JSON.stringify(params),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
@@ -103,13 +106,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
type: T,
|
||||
params: Req["params"]
|
||||
) =>
|
||||
await request<
|
||||
{ type: T; params: Req["params"] },
|
||||
AuthResponses[Req["type"]]
|
||||
>("/auth", {
|
||||
await request<Req["params"], AuthResponses[Req["type"]]>(
|
||||
"/auth",
|
||||
type,
|
||||
params,
|
||||
});
|
||||
params
|
||||
);
|
||||
|
||||
const user = async <
|
||||
T extends UserRequest["type"],
|
||||
@@ -118,10 +119,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
type: T,
|
||||
params: Req["params"]
|
||||
) =>
|
||||
await request<
|
||||
{ type: T; params: Req["params"] },
|
||||
UserResponses[Req["type"]]
|
||||
>("/user", { type, params });
|
||||
await request<Req["params"], UserResponses[Req["type"]]>(
|
||||
"/user",
|
||||
type,
|
||||
params
|
||||
);
|
||||
|
||||
const read = async <
|
||||
T extends ReadRequest["type"],
|
||||
@@ -130,10 +132,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
type: T,
|
||||
params: Req["params"]
|
||||
) =>
|
||||
await request<
|
||||
{ type: T; params: Req["params"] },
|
||||
ReadResponses[Req["type"]]
|
||||
>("/read", { type, params });
|
||||
await request<Req["params"], ReadResponses[Req["type"]]>(
|
||||
"/read",
|
||||
type,
|
||||
params
|
||||
);
|
||||
|
||||
const write = async <
|
||||
T extends WriteRequest["type"],
|
||||
@@ -142,10 +145,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
type: T,
|
||||
params: Req["params"]
|
||||
) =>
|
||||
await request<
|
||||
{ type: T; params: Req["params"] },
|
||||
WriteResponses[Req["type"]]
|
||||
>("/write", { type, params });
|
||||
await request<Req["params"], WriteResponses[Req["type"]]>(
|
||||
"/write",
|
||||
type,
|
||||
params
|
||||
);
|
||||
|
||||
const execute = async <
|
||||
T extends ExecuteRequest["type"],
|
||||
@@ -154,10 +158,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
type: T,
|
||||
params: Req["params"]
|
||||
) =>
|
||||
await request<
|
||||
{ type: T; params: Req["params"] },
|
||||
ExecuteResponses[Req["type"]]
|
||||
>("/execute", { type, params });
|
||||
await request<Req["params"], ExecuteResponses[Req["type"]]>(
|
||||
"/execute",
|
||||
type,
|
||||
params
|
||||
);
|
||||
|
||||
const execute_and_poll = async <
|
||||
T extends ExecuteRequest["type"],
|
||||
@@ -197,10 +202,58 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
|
||||
const core_version = () => read("GetVersion", {}).then((res) => res.version);
|
||||
|
||||
const subscribe_to_update_websocket = async ({
|
||||
const get_update_websocket = ({
|
||||
on_update,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
on_update: (update: UpdateListItem) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
}) => {
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/update");
|
||||
|
||||
// Handle login on websocket open
|
||||
ws.addEventListener("open", () => {
|
||||
on_open?.();
|
||||
const login_msg: WsLoginMessage =
|
||||
options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
});
|
||||
|
||||
ws.addEventListener("message", ({ data }: MessageEvent) => {
|
||||
if (data == "LOGGED_IN") return on_login?.();
|
||||
on_update(JSON.parse(data));
|
||||
});
|
||||
|
||||
if (on_close) {
|
||||
ws.addEventListener("close", on_close);
|
||||
}
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const subscribe_to_update_websocket = async ({
|
||||
on_update,
|
||||
on_open,
|
||||
on_login,
|
||||
on_close,
|
||||
retry = true,
|
||||
retry_timeout_ms = 5_000,
|
||||
cancel = new CancelToken(),
|
||||
on_cancel,
|
||||
@@ -209,6 +262,7 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
retry?: boolean;
|
||||
retry_timeout_ms?: number;
|
||||
cancel?: CancelToken;
|
||||
on_cancel?: () => void;
|
||||
@@ -220,37 +274,13 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
}
|
||||
|
||||
try {
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/update");
|
||||
|
||||
// Handle login on websocket open
|
||||
ws.addEventListener("open", () => {
|
||||
const login_msg: WsLoginMessage =
|
||||
options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
const ws = get_update_websocket({
|
||||
on_open,
|
||||
on_login,
|
||||
on_update,
|
||||
on_close,
|
||||
});
|
||||
|
||||
ws.addEventListener("message", ({ data }: MessageEvent) => {
|
||||
if (data == "LOGGED_IN") return on_login?.();
|
||||
on_update(JSON.parse(data));
|
||||
});
|
||||
|
||||
if (on_close) {
|
||||
ws.addEventListener("close", on_close);
|
||||
}
|
||||
|
||||
// This while loop will end when the socket is closed
|
||||
while (
|
||||
ws.readyState !== WebSocket.CLOSING &&
|
||||
@@ -261,12 +291,20 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
}
|
||||
|
||||
// Sleep for a bit before retrying connection to avoid spam.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
if (retry) {
|
||||
// Sleep for a bit before retrying connection to avoid spam.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error(error);
|
||||
// Sleep for a bit before retrying, maybe Komodo Core is down temporarily.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
if (retry) {
|
||||
// Sleep for a bit before retrying, maybe Komodo Core is down temporarily.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -327,6 +365,156 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
return ws;
|
||||
};
|
||||
|
||||
const connect_container_exec = ({
|
||||
query,
|
||||
on_message,
|
||||
on_login,
|
||||
on_open,
|
||||
on_close,
|
||||
}: {
|
||||
query: ConnectContainerExecQuery;
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
}) => {
|
||||
const url_query = new URLSearchParams(
|
||||
query as any as Record<string, string>
|
||||
).toString();
|
||||
const ws = new WebSocket(
|
||||
url.replace("http", "ws") + "/ws/container?" + url_query
|
||||
);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg: WsLoginMessage =
|
||||
options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
} else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
|
||||
ws.onclose = () => on_close?.();
|
||||
|
||||
return ws;
|
||||
};
|
||||
|
||||
const execute_terminal_stream = (request: ExecuteTerminalBody) =>
|
||||
new Promise<AsyncIterable<string>>(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + "/terminal/execute", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(
|
||||
new TransformStream<string, string>({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop()!; // last item may be incomplete
|
||||
for (const line of parts) controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail) controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
} as Transformer<string, string> & { tail: string })
|
||||
);
|
||||
res(stream);
|
||||
} else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
const execute_terminal = async (
|
||||
request: ExecuteTerminalBody,
|
||||
callbacks?: {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
}
|
||||
) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
} else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
|
||||
return {
|
||||
/**
|
||||
* Call the `/auth` api.
|
||||
@@ -411,6 +599,11 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
poll_update_until_complete,
|
||||
/** Returns the version of Komodo Core the client is calling to. */
|
||||
core_version,
|
||||
/**
|
||||
* Connects to update websocket, performs login and attaches handlers,
|
||||
* and returns the WebSocket handle.
|
||||
*/
|
||||
get_update_websocket,
|
||||
/**
|
||||
* Subscribes to the update websocket with automatic reconnect loop.
|
||||
*
|
||||
@@ -422,5 +615,53 @@ export function KomodoClient(url: string, options: InitOptions) {
|
||||
* for use with xtermjs.
|
||||
*/
|
||||
connect_terminal,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs.
|
||||
*/
|
||||
connect_container_exec,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal_stream({
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_terminal_stream,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal,
|
||||
};
|
||||
}
|
||||
|
||||
@@ -50,12 +50,6 @@ export type ReadResponses = {
|
||||
ListActions: Types.ListActionsResponse;
|
||||
ListFullActions: Types.ListFullActionsResponse;
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
GetServerTemplate: Types.GetServerTemplateResponse;
|
||||
GetServerTemplatesSummary: Types.GetServerTemplatesSummaryResponse;
|
||||
ListServerTemplates: Types.ListServerTemplatesResponse;
|
||||
ListFullServerTemplates: Types.ListFullServerTemplatesResponse;
|
||||
|
||||
// ==== SERVER ====
|
||||
GetServersSummary: Types.GetServersSummaryResponse;
|
||||
GetServer: Types.GetServerResponse;
|
||||
@@ -208,7 +202,6 @@ export type WriteResponses = {
|
||||
UpdateDescription: Types.UpdateDescriptionResponse;
|
||||
|
||||
// ==== SERVER ====
|
||||
LaunchServer: Types.Update;
|
||||
CreateServer: Types.Server;
|
||||
DeleteServer: Types.Server;
|
||||
UpdateServer: Types.Server;
|
||||
@@ -244,13 +237,6 @@ export type WriteResponses = {
|
||||
UpdateBuilder: Types.Builder;
|
||||
RenameBuilder: Types.Update;
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
CreateServerTemplate: Types.ServerTemplate;
|
||||
CopyServerTemplate: Types.ServerTemplate;
|
||||
DeleteServerTemplate: Types.ServerTemplate;
|
||||
UpdateServerTemplate: Types.ServerTemplate;
|
||||
RenameServerTemplate: Types.Update;
|
||||
|
||||
// ==== REPO ====
|
||||
CreateRepo: Types.Repo;
|
||||
CopyRepo: Types.Repo;
|
||||
@@ -386,9 +372,6 @@ export type ExecuteResponses = {
|
||||
RunAction: Types.Update;
|
||||
BatchRunAction: Types.BatchExecutionResponse;
|
||||
|
||||
// ==== SERVER TEMPLATE ====
|
||||
LaunchServer: Types.Update;
|
||||
|
||||
// ==== SYNC ====
|
||||
RunSync: Types.Update;
|
||||
|
||||
@@ -398,6 +381,7 @@ export type ExecuteResponses = {
|
||||
DeployStackIfChanged: Types.Update;
|
||||
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
|
||||
PullStack: Types.Update;
|
||||
BatchPullStack: Types.BatchExecutionResponse;
|
||||
StartStack: Types.Update;
|
||||
RestartStack: Types.Update;
|
||||
StopStack: Types.Update;
|
||||
|
||||
@@ -99,6 +99,11 @@ export interface ActionConfig {
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
/**
|
||||
* Whether deno will be instructed to reload all dependencies,
|
||||
* this can usually be kept false outside of development.
|
||||
*/
|
||||
reload_deno_deps?: boolean;
|
||||
/**
|
||||
* Typescript file contents using pre-initialized `komodo` client.
|
||||
* Supports variable / secret interpolation.
|
||||
@@ -201,7 +206,6 @@ export type ResourceTarget =
|
||||
| { type: "Action", id: string }
|
||||
| { type: "Builder", id: string }
|
||||
| { type: "Alerter", id: string }
|
||||
| { type: "ServerTemplate", id: string }
|
||||
| { type: "ResourceSync", id: string };
|
||||
|
||||
export interface AlerterConfig {
|
||||
@@ -564,6 +568,7 @@ export type Execution =
|
||||
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
|
||||
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
|
||||
| { type: "PullStack", params: PullStack }
|
||||
| { type: "BatchPullStack", params: BatchPullStack }
|
||||
| { type: "StartStack", params: StartStack }
|
||||
| { type: "RestartStack", params: RestartStack }
|
||||
| { type: "PauseStack", params: PauseStack }
|
||||
@@ -1719,16 +1724,6 @@ export type Server = Resource<ServerConfig, undefined>;
|
||||
|
||||
export type GetServerResponse = Server;
|
||||
|
||||
export type ServerTemplateConfig =
|
||||
/** Template to launch an AWS EC2 instance */
|
||||
| { type: "Aws", params: AwsServerTemplateConfig }
|
||||
/** Template to launch a Hetzner server */
|
||||
| { type: "Hetzner", params: HetznerServerTemplateConfig };
|
||||
|
||||
export type ServerTemplate = Resource<ServerTemplateConfig, undefined>;
|
||||
|
||||
export type GetServerTemplateResponse = ServerTemplate;
|
||||
|
||||
export interface StackActionState {
|
||||
pulling: boolean;
|
||||
deploying: boolean;
|
||||
@@ -1999,8 +1994,10 @@ export interface SystemInformation {
|
||||
host_name?: string;
|
||||
/** The CPU's brand */
|
||||
cpu_brand: string;
|
||||
/** Whether terminals are disabled on this Periphery */
|
||||
/** Whether terminals are disabled on this Periphery server */
|
||||
terminals_disabled: boolean;
|
||||
/** Whether container exec is disabled on this Periphery server */
|
||||
container_exec_disabled: boolean;
|
||||
}
|
||||
|
||||
export type GetSystemInformationResponse = SystemInformation;
|
||||
@@ -2240,11 +2237,6 @@ export enum Operation {
|
||||
RenameAlerter = "RenameAlerter",
|
||||
DeleteAlerter = "DeleteAlerter",
|
||||
TestAlerter = "TestAlerter",
|
||||
CreateServerTemplate = "CreateServerTemplate",
|
||||
UpdateServerTemplate = "UpdateServerTemplate",
|
||||
RenameServerTemplate = "RenameServerTemplate",
|
||||
DeleteServerTemplate = "DeleteServerTemplate",
|
||||
LaunchServer = "LaunchServer",
|
||||
CreateResourceSync = "CreateResourceSync",
|
||||
UpdateResourceSync = "UpdateResourceSync",
|
||||
RenameResourceSync = "RenameResourceSync",
|
||||
@@ -3352,8 +3344,6 @@ export type ListFullReposResponse = Repo[];
|
||||
|
||||
export type ListFullResourceSyncsResponse = ResourceSync[];
|
||||
|
||||
export type ListFullServerTemplatesResponse = ServerTemplate[];
|
||||
|
||||
export type ListFullServersResponse = Server[];
|
||||
|
||||
export type ListFullStacksResponse = Stack[];
|
||||
@@ -3513,17 +3503,6 @@ export type ListResourceSyncsResponse = ResourceSyncListItem[];
|
||||
|
||||
export type ListSecretsResponse = string[];
|
||||
|
||||
export interface ServerTemplateListItemInfo {
|
||||
/** The cloud provider */
|
||||
provider: string;
|
||||
/** The instance type, eg c5.2xlarge on for Aws templates */
|
||||
instance_type?: string;
|
||||
}
|
||||
|
||||
export type ServerTemplateListItem = ResourceListItem<ServerTemplateListItemInfo>;
|
||||
|
||||
export type ListServerTemplatesResponse = ServerTemplateListItem[];
|
||||
|
||||
export enum ServerState {
|
||||
/** Server is unreachable. */
|
||||
NotOk = "NotOk",
|
||||
@@ -3550,6 +3529,8 @@ export interface ServerListItemInfo {
|
||||
send_disk_alerts: boolean;
|
||||
/** Whether terminals are disabled for this Server. */
|
||||
terminals_disabled: boolean;
|
||||
/** Whether container exec is disabled for this Server. */
|
||||
container_exec_disabled: boolean;
|
||||
}
|
||||
|
||||
export type ServerListItem = ResourceListItem<ServerListItemInfo>;
|
||||
@@ -3735,12 +3716,6 @@ export interface ServerQuerySpecifics {
|
||||
/** Server-specific query */
|
||||
export type ServerQuery = ResourceQuery<ServerQuerySpecifics>;
|
||||
|
||||
export interface ServerTemplateQuerySpecifics {
|
||||
types: ServerTemplateConfig["type"][];
|
||||
}
|
||||
|
||||
export type ServerTemplateQuery = ResourceQuery<ServerTemplateQuerySpecifics>;
|
||||
|
||||
export type SetLastSeenUpdateResponse = NoData;
|
||||
|
||||
export interface StackQuerySpecifics {
|
||||
@@ -3794,8 +3769,6 @@ export type _PartialAlerterConfig = Partial<AlerterConfig>;
|
||||
|
||||
export type _PartialAwsBuilderConfig = Partial<AwsBuilderConfig>;
|
||||
|
||||
export type _PartialAwsServerTemplateConfig = Partial<AwsServerTemplateConfig>;
|
||||
|
||||
export type _PartialBuildConfig = Partial<BuildConfig>;
|
||||
|
||||
export type _PartialBuilderConfig = Partial<BuilderConfig>;
|
||||
@@ -3806,8 +3779,6 @@ export type _PartialDockerRegistryAccount = Partial<DockerRegistryAccount>;
|
||||
|
||||
export type _PartialGitProviderAccount = Partial<GitProviderAccount>;
|
||||
|
||||
export type _PartialHetznerServerTemplateConfig = Partial<HetznerServerTemplateConfig>;
|
||||
|
||||
export type _PartialProcedureConfig = Partial<ProcedureConfig>;
|
||||
|
||||
export type _PartialRepoConfig = Partial<RepoConfig>;
|
||||
@@ -3888,67 +3859,6 @@ export interface AwsBuilderConfig {
|
||||
secrets?: string[];
|
||||
}
|
||||
|
||||
export enum AwsVolumeType {
|
||||
Gp2 = "gp2",
|
||||
Gp3 = "gp3",
|
||||
Io1 = "io1",
|
||||
Io2 = "io2",
|
||||
}
|
||||
|
||||
/**
|
||||
* For information on AWS volumes, see
|
||||
* `<https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html>`.
|
||||
*/
|
||||
export interface AwsVolume {
|
||||
/** The device name (for example, `/dev/sda1` or `xvdh`). */
|
||||
device_name: string;
|
||||
/** The size of the volume in GB */
|
||||
size_gb: number;
|
||||
/** The type of volume. Options: gp2, gp3, io1, io2. */
|
||||
volume_type: AwsVolumeType;
|
||||
/** The iops of the volume, or 0 for AWS default. */
|
||||
iops: number;
|
||||
/** The throughput of the volume, or 0 for AWS default. */
|
||||
throughput: number;
|
||||
}
|
||||
|
||||
/** Aws EC2 instance config. */
|
||||
export interface AwsServerTemplateConfig {
|
||||
/** The aws region to launch the server in, eg. us-east-1 */
|
||||
region: string;
|
||||
/** The instance type to launch, eg. c5.2xlarge */
|
||||
instance_type: string;
|
||||
/** Specify the ami id to use. Must be set up to start the periphery binary on startup. */
|
||||
ami_id: string;
|
||||
/** The subnet to assign to the instance. */
|
||||
subnet_id: string;
|
||||
/** The key pair name to give to the instance in case SSH access required. */
|
||||
key_pair_name: string;
|
||||
/**
|
||||
* Assign a public ip to the instance. Depending on how your network is
|
||||
* setup, this may be required for the instance to reach the public internet.
|
||||
*/
|
||||
assign_public_ip: boolean;
|
||||
/**
|
||||
* Use the instances public ip as the address for the server.
|
||||
* Could be used when build instances are created in another non-interconnected network to the core api.
|
||||
*/
|
||||
use_public_ip: boolean;
|
||||
/**
|
||||
* The port periphery will be running on in AMI.
|
||||
* Default: `8120`
|
||||
*/
|
||||
port: number;
|
||||
/** Whether Periphery will be running on https */
|
||||
use_https: boolean;
|
||||
/** The security groups to give to the instance. */
|
||||
security_group_ids?: string[];
|
||||
/** Specify the EBS volumes to attach. */
|
||||
volumes: AwsVolume[];
|
||||
/** The user data to deploy the instance with. */
|
||||
user_data: string;
|
||||
}
|
||||
|
||||
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
|
||||
export interface BatchBuildRepo {
|
||||
/**
|
||||
@@ -4090,6 +4000,23 @@ export interface BatchPullRepo {
|
||||
pattern: string;
|
||||
}
|
||||
|
||||
/** Pulls multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
|
||||
export interface BatchPullStack {
|
||||
/**
|
||||
* Id or name or wildcard pattern or regex.
|
||||
* Supports multiline and comma delineated combinations of the above.
|
||||
*
|
||||
* Example:
|
||||
* ```
|
||||
* # match all foo-* stacks
|
||||
* foo-*
|
||||
* # add some more
|
||||
* extra-stack-1, extra-stack-2
|
||||
* ```
|
||||
*/
|
||||
pattern: string;
|
||||
}
|
||||
|
||||
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResponse] */
|
||||
export interface BatchRunAction {
|
||||
/**
|
||||
@@ -4236,6 +4163,19 @@ export interface CommitSync {
|
||||
sync: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query to connect to a container exec session (interactive shell over websocket) on the given server.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ConnectContainerExecQuery {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to connect to */
|
||||
shell: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Query to connect to a terminal (interactive shell over websocket) on the given server.
|
||||
* TODO: Document calling.
|
||||
@@ -4245,15 +4185,11 @@ export interface ConnectTerminalQuery {
|
||||
server: string;
|
||||
/**
|
||||
* Each periphery can keep multiple terminals open.
|
||||
* If a terminals with the specified name already exists,
|
||||
* it will be attached to.
|
||||
* Otherwise a new terminal will be created for the command,
|
||||
* which will persist until it is deleted using
|
||||
* [DeleteTerminal][crate::api::write::server::DeleteTerminal]
|
||||
* If a terminals with the specified name does not exist,
|
||||
* the call will fail.
|
||||
* Create a terminal using [CreateTerminal][super::write::server::CreateTerminal]
|
||||
*/
|
||||
terminal: string;
|
||||
/** Optional. The initial command to execute on connection to the shell. */
|
||||
init?: string;
|
||||
}
|
||||
|
||||
export interface Conversion {
|
||||
@@ -4351,17 +4287,6 @@ export interface CopyResourceSync {
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new server template with given `name` and the configuration
|
||||
* of the server template at the given `id`. Response: [ServerTemplate]
|
||||
*/
|
||||
export interface CopyServerTemplate {
|
||||
/** The name of the new server template. */
|
||||
name: string;
|
||||
/** The id of the server template to copy. */
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new stack with given `name` and the configuration
|
||||
* of the stack at the given `id`. Response: [Stack].
|
||||
@@ -4577,18 +4502,6 @@ export interface CreateServer {
|
||||
config?: _PartialServerConfig;
|
||||
}
|
||||
|
||||
export type PartialServerTemplateConfig =
|
||||
| { type: "Aws", params: _PartialAwsServerTemplateConfig }
|
||||
| { type: "Hetzner", params: _PartialHetznerServerTemplateConfig };
|
||||
|
||||
/** Create a server template. Response: [ServerTemplate]. */
|
||||
export interface CreateServerTemplate {
|
||||
/** The name given to newly created server template. */
|
||||
name: string;
|
||||
/** Optional partial config to initialize the server template with. */
|
||||
config?: PartialServerTemplateConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* **Admin only.** Create a service user.
|
||||
* Response: [User].
|
||||
@@ -4887,15 +4800,6 @@ export interface DeleteServer {
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the server template at the given id, and returns the deleted server template.
|
||||
* Response: [ServerTemplate]
|
||||
*/
|
||||
export interface DeleteServerTemplate {
|
||||
/** The id or name of the server template to delete. */
|
||||
id: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the stack at the given id, and returns the deleted stack.
|
||||
* Response: [Stack]
|
||||
@@ -5106,6 +5010,24 @@ export interface ExchangeForJwt {
|
||||
token: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a terminal command on the given server.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteTerminalBody {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/**
|
||||
* The name of the terminal on the server to use to execute.
|
||||
* If the terminal at name exists, it will be used to execute the command.
|
||||
* Otherwise, a new terminal will be created for this command, which will
|
||||
* persist until it exits or is deleted.
|
||||
*/
|
||||
terminal: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get pretty formatted monrun sync toml for all resources
|
||||
* which the user has permissions to view.
|
||||
@@ -5351,6 +5273,8 @@ export interface GetCoreInfoResponse {
|
||||
disable_confirm_dialog: boolean;
|
||||
/** The repo owners for which github webhook management api is available */
|
||||
github_webhook_owners: string[];
|
||||
/** Whether to disable websocket automatic reconnect. */
|
||||
disable_websocket_reconnect: boolean;
|
||||
}
|
||||
|
||||
/** Get a specific deployment by name or id. Response: [Deployment]. */
|
||||
@@ -5735,25 +5659,6 @@ export interface GetServerStateResponse {
|
||||
status: ServerState;
|
||||
}
|
||||
|
||||
/** Get a specific server template by id or name. Response: [ServerTemplate]. */
|
||||
export interface GetServerTemplate {
|
||||
/** Id or name */
|
||||
server_template: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a summary of data relating to all server templates.
|
||||
* Response: [GetServerTemplatesSummaryResponse].
|
||||
*/
|
||||
export interface GetServerTemplatesSummary {
|
||||
}
|
||||
|
||||
/** Response for [GetServerTemplatesSummary]. */
|
||||
export interface GetServerTemplatesSummaryResponse {
|
||||
/** The total number of server templates. */
|
||||
total: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a summary of data relating to all servers.
|
||||
* Response: [GetServersSummaryResponse].
|
||||
@@ -5963,112 +5868,6 @@ export interface GetVersionResponse {
|
||||
version: string;
|
||||
}
|
||||
|
||||
export enum HetznerDatacenter {
|
||||
Nuremberg1Dc3 = "Nuremberg1Dc3",
|
||||
Helsinki1Dc2 = "Helsinki1Dc2",
|
||||
Falkenstein1Dc14 = "Falkenstein1Dc14",
|
||||
AshburnDc1 = "AshburnDc1",
|
||||
HillsboroDc1 = "HillsboroDc1",
|
||||
SingaporeDc1 = "SingaporeDc1",
|
||||
}
|
||||
|
||||
export enum HetznerServerType {
|
||||
/** CPX11 - AMD 2 Cores, 2 Gb Ram, 40 Gb disk */
|
||||
SharedAmd2Core2Ram40Disk = "SharedAmd2Core2Ram40Disk",
|
||||
/** CAX11 - Arm 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
SharedArm2Core4Ram40Disk = "SharedArm2Core4Ram40Disk",
|
||||
/** CX22 - Intel 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
SharedIntel2Core4Ram40Disk = "SharedIntel2Core4Ram40Disk",
|
||||
/** CPX21 - AMD 3 Cores, 4 Gb Ram, 80 Gb disk */
|
||||
SharedAmd3Core4Ram80Disk = "SharedAmd3Core4Ram80Disk",
|
||||
/** CAX21 - Arm 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
SharedArm4Core8Ram80Disk = "SharedArm4Core8Ram80Disk",
|
||||
/** CX32 - Intel 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
SharedIntel4Core8Ram80Disk = "SharedIntel4Core8Ram80Disk",
|
||||
/** CPX31 - AMD 4 Cores, 8 Gb Ram, 160 Gb disk */
|
||||
SharedAmd4Core8Ram160Disk = "SharedAmd4Core8Ram160Disk",
|
||||
/** CAX31 - Arm 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
SharedArm8Core16Ram160Disk = "SharedArm8Core16Ram160Disk",
|
||||
/** CX42 - Intel 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
SharedIntel8Core16Ram160Disk = "SharedIntel8Core16Ram160Disk",
|
||||
/** CPX41 - AMD 8 Cores, 16 Gb Ram, 240 Gb disk */
|
||||
SharedAmd8Core16Ram240Disk = "SharedAmd8Core16Ram240Disk",
|
||||
/** CAX41 - Arm 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
SharedArm16Core32Ram320Disk = "SharedArm16Core32Ram320Disk",
|
||||
/** CX52 - Intel 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
SharedIntel16Core32Ram320Disk = "SharedIntel16Core32Ram320Disk",
|
||||
/** CPX51 - AMD 16 Cores, 32 Gb Ram, 360 Gb disk */
|
||||
SharedAmd16Core32Ram360Disk = "SharedAmd16Core32Ram360Disk",
|
||||
/** CCX13 - AMD 2 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
DedicatedAmd2Core8Ram80Disk = "DedicatedAmd2Core8Ram80Disk",
|
||||
/** CCX23 - AMD 4 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
DedicatedAmd4Core16Ram160Disk = "DedicatedAmd4Core16Ram160Disk",
|
||||
/** CCX33 - AMD 8 Cores, 32 Gb Ram, 240 Gb disk */
|
||||
DedicatedAmd8Core32Ram240Disk = "DedicatedAmd8Core32Ram240Disk",
|
||||
/** CCX43 - AMD 16 Cores, 64 Gb Ram, 360 Gb disk */
|
||||
DedicatedAmd16Core64Ram360Disk = "DedicatedAmd16Core64Ram360Disk",
|
||||
/** CCX53 - AMD 32 Cores, 128 Gb Ram, 600 Gb disk */
|
||||
DedicatedAmd32Core128Ram600Disk = "DedicatedAmd32Core128Ram600Disk",
|
||||
/** CCX63 - AMD 48 Cores, 192 Gb Ram, 960 Gb disk */
|
||||
DedicatedAmd48Core192Ram960Disk = "DedicatedAmd48Core192Ram960Disk",
|
||||
}
|
||||
|
||||
export enum HetznerVolumeFormat {
|
||||
Xfs = "Xfs",
|
||||
Ext4 = "Ext4",
|
||||
}
|
||||
|
||||
export interface HetznerVolumeSpecs {
|
||||
/** A name for the volume */
|
||||
name: string;
|
||||
/** Size of the volume in GB */
|
||||
size_gb: I64;
|
||||
/** The format for the volume */
|
||||
format?: HetznerVolumeFormat;
|
||||
/** Labels for the volume */
|
||||
labels?: Record<string, string>;
|
||||
}
|
||||
|
||||
/** Hetzner server config. */
|
||||
export interface HetznerServerTemplateConfig {
|
||||
/** ID or name of the Image the Server is created from */
|
||||
image: string;
|
||||
/** ID or name of Datacenter to create Server in */
|
||||
datacenter?: HetznerDatacenter;
|
||||
/**
|
||||
* ID of the Placement Group the server should be in,
|
||||
* Or 0 to not use placement group.
|
||||
*/
|
||||
placement_group?: I64;
|
||||
/** ID or name of the Server type this Server should be created with */
|
||||
server_type?: HetznerServerType;
|
||||
/** SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time */
|
||||
ssh_keys?: string[];
|
||||
/** Network IDs which should be attached to the Server private network interface at the creation time */
|
||||
private_network_ids?: I64[];
|
||||
/** Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. */
|
||||
enable_public_ipv4?: boolean;
|
||||
/** Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. */
|
||||
enable_public_ipv6?: boolean;
|
||||
/** Connect to the instance using it's public ip. */
|
||||
use_public_ip?: boolean;
|
||||
/**
|
||||
* The port periphery will be running on in AMI.
|
||||
* Default: `8120`
|
||||
*/
|
||||
port: number;
|
||||
/** Whether Periphery will be running on https */
|
||||
use_https: boolean;
|
||||
/** The firewalls to attach to the instance */
|
||||
firewall_ids?: I64[];
|
||||
/** Labels for the server */
|
||||
labels?: Record<string, string>;
|
||||
/** Specs for volumes to attach */
|
||||
volumes?: HetznerVolumeSpecs[];
|
||||
/** Cloud-Init user data to use during Server creation. This field is limited to 32KiB. */
|
||||
user_data: string;
|
||||
}
|
||||
|
||||
/** Inspect a docker container on the server. Response: [Container]. */
|
||||
export interface InspectDockerContainer {
|
||||
/** Id or name */
|
||||
@@ -6106,17 +5905,6 @@ export interface LatestCommit {
|
||||
message: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Launch an EC2 instance with the specified config.
|
||||
* Response: [Update].
|
||||
*/
|
||||
export interface LaunchServer {
|
||||
/** The name of the created server. */
|
||||
name: string;
|
||||
/** The server template used to define the config. */
|
||||
server_template: string;
|
||||
}
|
||||
|
||||
/** List actions matching optional query. Response: [ListActionsResponse]. */
|
||||
export interface ListActions {
|
||||
/** optional structured query to filter actions. */
|
||||
@@ -6407,11 +6195,6 @@ export interface ListFullResourceSyncs {
|
||||
query?: ResourceSyncQuery;
|
||||
}
|
||||
|
||||
/** List server templates matching structured query. Response: [ListFullServerTemplatesResponse]. */
|
||||
export interface ListFullServerTemplates {
|
||||
query?: ServerTemplateQuery;
|
||||
}
|
||||
|
||||
/** List servers matching optional query. Response: [ListFullServersResponse]. */
|
||||
export interface ListFullServers {
|
||||
/** optional structured query to filter servers. */
|
||||
@@ -6490,11 +6273,6 @@ export interface ListSecrets {
|
||||
target?: ResourceTarget;
|
||||
}
|
||||
|
||||
/** List server templates matching structured query. Response: [ListServerTemplatesResponse]. */
|
||||
export interface ListServerTemplates {
|
||||
query?: ServerTemplateQuery;
|
||||
}
|
||||
|
||||
/** List servers matching optional query. Response: [ListServersResponse]. */
|
||||
export interface ListServers {
|
||||
/** optional structured query to filter servers. */
|
||||
@@ -6993,17 +6771,6 @@ export interface RenameServer {
|
||||
name: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Rename the ServerTemplate at id to the given name.
|
||||
* Response: [Update].
|
||||
*/
|
||||
export interface RenameServerTemplate {
|
||||
/** The id or name of the ServerTemplate to rename. */
|
||||
id: string;
|
||||
/** The new name. */
|
||||
name: string;
|
||||
}
|
||||
|
||||
/** Rename the stack at id to the given name. Response: [Update]. */
|
||||
export interface RenameStack {
|
||||
/** The id of the stack to rename. */
|
||||
@@ -7076,7 +6843,6 @@ export interface ResourcesToml {
|
||||
actions?: ResourceToml<_PartialActionConfig>[];
|
||||
alerters?: ResourceToml<_PartialAlerterConfig>[];
|
||||
builders?: ResourceToml<_PartialBuilderConfig>[];
|
||||
server_templates?: ResourceToml<PartialServerTemplateConfig>[];
|
||||
resource_syncs?: ResourceToml<_PartialResourceSyncConfig>[];
|
||||
user_groups?: UserGroupToml[];
|
||||
variables?: Variable[];
|
||||
@@ -7503,7 +7269,7 @@ export interface UpdateAlerter {
|
||||
* field changes occur from out of date local state.
|
||||
*/
|
||||
export interface UpdateBuild {
|
||||
/** The id of the build to update. */
|
||||
/** The id or name of the build to update. */
|
||||
id: string;
|
||||
/** The partial config update to apply. */
|
||||
config: _PartialBuildConfig;
|
||||
@@ -7676,23 +7442,6 @@ export interface UpdateServer {
|
||||
config: _PartialServerConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the server template at the given id, and return the updated server template.
|
||||
* Response: [ServerTemplate].
|
||||
*
|
||||
* Note. This method updates only the fields which are set in the [PartialServerTemplateConfig],
|
||||
* effectively merging diffs into the final document.
|
||||
* This is helpful when multiple users are using
|
||||
* the same resources concurrently by ensuring no unintentional
|
||||
* field changes occur from out of date local state.
|
||||
*/
|
||||
export interface UpdateServerTemplate {
|
||||
/** The id of the server template to update. */
|
||||
id: string;
|
||||
/** The partial config update to apply. */
|
||||
config: PartialServerTemplateConfig;
|
||||
}
|
||||
|
||||
/**
|
||||
* **Admin only.** Update a service user's description.
|
||||
* Response: [User].
|
||||
@@ -7896,6 +7645,7 @@ export type ExecuteRequest =
|
||||
| { type: "DeployStackIfChanged", params: DeployStackIfChanged }
|
||||
| { type: "BatchDeployStackIfChanged", params: BatchDeployStackIfChanged }
|
||||
| { type: "PullStack", params: PullStack }
|
||||
| { type: "BatchPullStack", params: BatchPullStack }
|
||||
| { type: "StartStack", params: StartStack }
|
||||
| { type: "RestartStack", params: RestartStack }
|
||||
| { type: "StopStack", params: StopStack }
|
||||
@@ -7917,7 +7667,6 @@ export type ExecuteRequest =
|
||||
| { type: "BatchRunProcedure", params: BatchRunProcedure }
|
||||
| { type: "RunAction", params: RunAction }
|
||||
| { type: "BatchRunAction", params: BatchRunAction }
|
||||
| { type: "LaunchServer", params: LaunchServer }
|
||||
| { type: "TestAlerter", params: TestAlerter }
|
||||
| { type: "RunSync", params: RunSync };
|
||||
|
||||
@@ -7954,10 +7703,6 @@ export type ReadRequest =
|
||||
| { type: "GetActionActionState", params: GetActionActionState }
|
||||
| { type: "ListActions", params: ListActions }
|
||||
| { type: "ListFullActions", params: ListFullActions }
|
||||
| { type: "GetServerTemplate", params: GetServerTemplate }
|
||||
| { type: "GetServerTemplatesSummary", params: GetServerTemplatesSummary }
|
||||
| { type: "ListServerTemplates", params: ListServerTemplates }
|
||||
| { type: "ListFullServerTemplates", params: ListFullServerTemplates }
|
||||
| { type: "GetServersSummary", params: GetServersSummary }
|
||||
| { type: "GetServer", params: GetServer }
|
||||
| { type: "GetServerState", params: GetServerState }
|
||||
@@ -8103,11 +7848,6 @@ export type WriteRequest =
|
||||
| { type: "DeleteBuilder", params: DeleteBuilder }
|
||||
| { type: "UpdateBuilder", params: UpdateBuilder }
|
||||
| { type: "RenameBuilder", params: RenameBuilder }
|
||||
| { type: "CreateServerTemplate", params: CreateServerTemplate }
|
||||
| { type: "CopyServerTemplate", params: CopyServerTemplate }
|
||||
| { type: "DeleteServerTemplate", params: DeleteServerTemplate }
|
||||
| { type: "UpdateServerTemplate", params: UpdateServerTemplate }
|
||||
| { type: "RenameServerTemplate", params: RenameServerTemplate }
|
||||
| { type: "CreateRepo", params: CreateRepo }
|
||||
| { type: "CopyRepo", params: CopyRepo }
|
||||
| { type: "DeleteRepo", params: DeleteRepo }
|
||||
|
||||
@@ -70,11 +70,37 @@ pub struct ConnectTerminalQuery {
|
||||
pub token: String,
|
||||
/// Each periphery can keep multiple terminals open.
|
||||
/// If a terminal with the specified name already exists,
|
||||
/// it will be attached to.
|
||||
/// Otherwise a new terminal will be created,
|
||||
/// which will persist until it is either exited via command (ie `exit`),
|
||||
/// or deleted using [DeleteTerminal]
|
||||
/// it will be attached to. Otherwise, it will fail.
|
||||
pub terminal: String,
|
||||
/// Optional. The initial command to execute on connection to the shell.
|
||||
pub init: Option<String>,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ExecuteTerminalBody {
|
||||
/// Specify the terminal to execute the command on.
|
||||
pub terminal: String,
|
||||
/// The command to execute.
|
||||
pub command: String,
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
//
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct ConnectContainerExecQuery {
|
||||
/// Use [CreateTerminalAuthToken] to create a single-use
|
||||
/// token to send in the query.
|
||||
pub token: String,
|
||||
/// The name of the container to connect to.
|
||||
pub container: String,
|
||||
/// The shell to start inside container.
|
||||
/// Default: `sh`
|
||||
#[serde(default = "default_container_shell")]
|
||||
pub shell: String,
|
||||
}
|
||||
|
||||
fn default_container_shell() -> String {
|
||||
String::from("sh")
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::Context;
|
||||
use komodo_client::terminal::TerminalStreamResponse;
|
||||
use rustls::{ClientConfig, client::danger::ServerCertVerifier};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio_tungstenite::{Connector, MaybeTlsStream, WebSocketStream};
|
||||
|
||||
use crate::{
|
||||
PeripheryClient,
|
||||
api::terminal::{ConnectTerminalQuery, CreateTerminalAuthToken},
|
||||
api::terminal::{
|
||||
ConnectContainerExecQuery, ConnectTerminalQuery,
|
||||
CreateTerminalAuthToken, ExecuteTerminalBody,
|
||||
},
|
||||
};
|
||||
|
||||
impl PeripheryClient {
|
||||
@@ -16,10 +20,9 @@ impl PeripheryClient {
|
||||
pub async fn connect_terminal(
|
||||
&self,
|
||||
terminal: String,
|
||||
init: Option<String>,
|
||||
) -> anyhow::Result<WebSocketStream<MaybeTlsStream<TcpStream>>> {
|
||||
tracing::trace!(
|
||||
"request | type: ConnectTerminal | terminal name: {terminal} | init command: {init:?}",
|
||||
"request | type: ConnectTerminal | terminal name: {terminal}",
|
||||
);
|
||||
|
||||
let token = self
|
||||
@@ -30,7 +33,6 @@ impl PeripheryClient {
|
||||
let query_str = serde_qs::to_string(&ConnectTerminalQuery {
|
||||
token: token.token,
|
||||
terminal,
|
||||
init,
|
||||
})
|
||||
.context("Failed to serialize query string")?;
|
||||
|
||||
@@ -39,30 +41,117 @@ impl PeripheryClient {
|
||||
self.address.replacen("http", "ws", 1)
|
||||
);
|
||||
|
||||
let (stream, _) = if url.starts_with("wss") {
|
||||
tokio_tungstenite::connect_async_tls_with_config(
|
||||
url,
|
||||
None,
|
||||
false,
|
||||
Some(Connector::Rustls(Arc::new(
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(
|
||||
InsecureVerifier,
|
||||
))
|
||||
.with_no_client_auth(),
|
||||
))),
|
||||
)
|
||||
connect_websocket(&url).await
|
||||
}
|
||||
|
||||
/// Handles ws connect and login.
|
||||
/// Does not handle reconnect.
|
||||
pub async fn connect_container_exec(
|
||||
&self,
|
||||
container: String,
|
||||
shell: String,
|
||||
) -> anyhow::Result<WebSocketStream<MaybeTlsStream<TcpStream>>> {
|
||||
tracing::trace!(
|
||||
"request | type: ConnectContainerExec | container name: {container} | shell: {shell}",
|
||||
);
|
||||
|
||||
let token = self
|
||||
.request(CreateTerminalAuthToken {})
|
||||
.await
|
||||
.context("Failed to create terminal auth token")?;
|
||||
|
||||
let query_str = serde_qs::to_string(&ConnectContainerExecQuery {
|
||||
token: token.token,
|
||||
container,
|
||||
shell,
|
||||
})
|
||||
.context("Failed to serialize query string")?;
|
||||
|
||||
let url = format!(
|
||||
"{}/terminal/container?{query_str}",
|
||||
self.address.replacen("http", "ws", 1)
|
||||
);
|
||||
|
||||
connect_websocket(&url).await
|
||||
}
|
||||
|
||||
/// Executes command on specified terminal,
|
||||
/// and streams the response ending in [KOMODO_EXIT_CODE][komodo_client::entities::KOMODO_EXIT_CODE]
|
||||
/// sentinal value as the expected final line of the stream.
|
||||
///
|
||||
/// Example final line:
|
||||
/// ```
|
||||
/// __KOMODO_EXIT_CODE:0
|
||||
/// ```
|
||||
///
|
||||
/// This means the command exited with code 0 (success).
|
||||
///
|
||||
/// If this value is NOT the final item before stream closes, it means
|
||||
/// the terminal exited mid command, before giving status. Example: running `exit`.
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
pub async fn execute_terminal(
|
||||
&self,
|
||||
terminal: String,
|
||||
command: String,
|
||||
) -> anyhow::Result<TerminalStreamResponse> {
|
||||
tracing::trace!(
|
||||
"sending request | type: ExecuteTerminal | terminal name: {terminal} | command: {command}",
|
||||
);
|
||||
let req = crate::periphery_http_client()
|
||||
.post(format!("{}/terminal/execute", self.address))
|
||||
.json(&ExecuteTerminalBody { terminal, command })
|
||||
.header("authorization", &self.passkey);
|
||||
let res =
|
||||
req.send().await.context("Failed at request to periphery")?;
|
||||
let status = res.status();
|
||||
tracing::debug!(
|
||||
"got response | type: ExecuteTerminal | {status} | response: {res:?}",
|
||||
);
|
||||
if status.is_success() {
|
||||
Ok(TerminalStreamResponse(res))
|
||||
} else {
|
||||
tracing::debug!("response is non-200");
|
||||
|
||||
let text = res
|
||||
.text()
|
||||
.await
|
||||
.context("Failed to convert response to text")?;
|
||||
|
||||
tracing::debug!("got response text, deserializing error");
|
||||
|
||||
let error = serror::deserialize_error(text).context(status);
|
||||
|
||||
Err(error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn connect_websocket(
|
||||
url: &str,
|
||||
) -> anyhow::Result<WebSocketStream<MaybeTlsStream<TcpStream>>> {
|
||||
let (stream, _) = if url.starts_with("wss") {
|
||||
tokio_tungstenite::connect_async_tls_with_config(
|
||||
url,
|
||||
None,
|
||||
false,
|
||||
Some(Connector::Rustls(Arc::new(
|
||||
ClientConfig::builder()
|
||||
.dangerous()
|
||||
.with_custom_certificate_verifier(Arc::new(
|
||||
InsecureVerifier,
|
||||
))
|
||||
.with_no_client_auth(),
|
||||
))),
|
||||
)
|
||||
.await
|
||||
.context("failed to connect to websocket")?
|
||||
} else {
|
||||
tokio_tungstenite::connect_async(url)
|
||||
.await
|
||||
.context("failed to connect to websocket")?
|
||||
} else {
|
||||
tokio_tungstenite::connect_async(url)
|
||||
.await
|
||||
.context("failed to connect to websocket")?
|
||||
};
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -105,14 +105,10 @@ KOMODO_GOOGLE_OAUTH_ENABLED=false
|
||||
# KOMODO_GOOGLE_OAUTH_ID= # Alt: KOMODO_GOOGLE_OAUTH_ID_FILE
|
||||
# KOMODO_GOOGLE_OAUTH_SECRET= # Alt: KOMODO_GOOGLE_OAUTH_SECRET_FILE
|
||||
|
||||
## Aws - Used to launch Builder instances and ServerTemplate instances.
|
||||
## Aws - Used to launch Builder instances.
|
||||
KOMODO_AWS_ACCESS_KEY_ID= # Alt: KOMODO_AWS_ACCESS_KEY_ID_FILE
|
||||
KOMODO_AWS_SECRET_ACCESS_KEY= # Alt: KOMODO_AWS_SECRET_ACCESS_KEY_FILE
|
||||
|
||||
## Hetzner - Used to launch ServerTemplate instances
|
||||
## Hetzner Builder not supported due to Hetzner pay-by-the-hour pricing model
|
||||
KOMODO_HETZNER_TOKEN= # Alt: KOMODO_HETZNER_TOKEN_FILE
|
||||
|
||||
#=------------------------------=#
|
||||
#= Komodo Periphery Environment =#
|
||||
#=------------------------------=#
|
||||
|
||||
@@ -65,6 +65,12 @@ ui_write_disabled = false
|
||||
## Default: false
|
||||
disable_confirm_dialog = false
|
||||
|
||||
## Disables UI websocket automatic reconnection.
|
||||
## Users will still be able to trigger reconnect by clicking the connection indicator.
|
||||
## Env: KOMODO_DISABLE_WEBSOCKET_RECONNECT
|
||||
## Default: false
|
||||
disable_websocket_reconnect = false
|
||||
|
||||
## Configure the directory for sync files (inside the container).
|
||||
## There shouldn't be a need to change this, just mount a volume.
|
||||
## Env: KOMODO_SYNC_DIRECTORY
|
||||
@@ -404,21 +410,12 @@ resource_poll_interval = "5-min"
|
||||
## Komodo can build images by deploying AWS EC2 instances,
|
||||
## running the build, and afterwards destroying the instance.
|
||||
|
||||
## Additionally, Komodo can deploy cloud VPS on AWS EC2 and Hetzner.
|
||||
## Use the Template resource to configure launch preferences.
|
||||
## Hetzner is not supported for builds as their pricing model is by the hour,
|
||||
## while AWS is by the minute. This is very important for builds.
|
||||
|
||||
## Provide AWS api keys for ephemeral builders / server launch
|
||||
## Provide AWS api keys for ephemeral builders
|
||||
## Env: KOMODO_AWS_ACCESS_KEY_ID or KOMODO_AWS_ACCESS_KEY_ID_FILE
|
||||
aws.access_key_id = ""
|
||||
## Env: KOMODO_AWS_SECRET_ACCESS_KEY or KOMODO_AWS_SECRET_ACCESS_KEY_FILE
|
||||
aws.secret_access_key = ""
|
||||
|
||||
## Provide Hetzner api token for server launch
|
||||
## Env: KOMODO_HETZNER_TOKEN or KOMODO_HETZNER_TOKEN_FILE
|
||||
hetzner.token = ""
|
||||
|
||||
#################
|
||||
# GIT PROVIDERS #
|
||||
#################
|
||||
|
||||
@@ -53,6 +53,12 @@ root_directory = "/etc/komodo"
|
||||
## Default: false
|
||||
disable_terminals = false
|
||||
|
||||
## Disable the container exec APIs and disallow remote container shell access through Periphery.
|
||||
## This can be left enabled while general terminal access is disabled.
|
||||
## Env: PERIPHERY_DISABLE_CONTAINER_EXEC
|
||||
## Default: false
|
||||
disable_container_exec = false
|
||||
|
||||
## How often Periphery polls the host for system stats,
|
||||
## like CPU / memory usage. To effectively disable polling,
|
||||
## set this to something like 1-hr.
|
||||
@@ -103,7 +109,7 @@ passkeys = []
|
||||
## If true and a key / cert at the given paths are not found,
|
||||
## self signed keys will be generated using openssl.
|
||||
## Env: PERIPHERY_SSL_ENABLED
|
||||
## Default: false (will change to `true` in later release)
|
||||
## Default: true
|
||||
ssl_enabled = true
|
||||
|
||||
## Path to the ssl key.
|
||||
|
||||
78
frontend/public/client/lib.d.ts
vendored
78
frontend/public/client/lib.d.ts
vendored
@@ -1,5 +1,5 @@
|
||||
import { AuthResponses, ExecuteResponses, ReadResponses, UserResponses, WriteResponses } from "./responses.js";
|
||||
import { AuthRequest, ConnectTerminalQuery, ExecuteRequest, ReadRequest, Update, UpdateListItem, UserRequest, WriteRequest } from "./types.js";
|
||||
import { AuthRequest, ConnectContainerExecQuery, ConnectTerminalQuery, ExecuteRequest, ExecuteTerminalBody, ReadRequest, Update, UpdateListItem, UserRequest, WriteRequest } from "./types.js";
|
||||
export * as Types from "./types.js";
|
||||
type InitOptions = {
|
||||
type: "jwt";
|
||||
@@ -119,15 +119,26 @@ export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
/** Returns the version of Komodo Core the client is calling to. */
|
||||
core_version: () => Promise<string>;
|
||||
/**
|
||||
* Subscribes to the update websocket with automatic reconnect loop.
|
||||
*
|
||||
* Note. Awaiting this method will never finish.
|
||||
* Connects to update websocket, performs login and attaches handlers,
|
||||
* and returns the WebSocket handle.
|
||||
*/
|
||||
subscribe_to_update_websocket: ({ on_update, on_login, on_close, retry_timeout_ms, cancel, on_cancel, }: {
|
||||
get_update_websocket: ({ on_update, on_login, on_open, on_close, }: {
|
||||
on_update: (update: UpdateListItem) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
}) => WebSocket;
|
||||
/**
|
||||
* Subscribes to the update websocket with automatic reconnect loop.
|
||||
*
|
||||
* Note. Awaiting this method will never finish.
|
||||
*/
|
||||
subscribe_to_update_websocket: ({ on_update, on_open, on_login, on_close, retry, retry_timeout_ms, cancel, on_cancel, }: {
|
||||
on_update: (update: UpdateListItem) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
retry?: boolean;
|
||||
retry_timeout_ms?: number;
|
||||
cancel?: CancelToken;
|
||||
on_cancel?: () => void;
|
||||
@@ -143,4 +154,61 @@ export declare function KomodoClient(url: string, options: InitOptions): {
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
}) => WebSocket;
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs.
|
||||
*/
|
||||
connect_container_exec: ({ query, on_message, on_login, on_open, on_close, }: {
|
||||
query: ConnectContainerExecQuery;
|
||||
on_message?: (e: MessageEvent<any>) => void;
|
||||
on_login?: () => void;
|
||||
on_open?: () => void;
|
||||
on_close?: () => void;
|
||||
}) => WebSocket;
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal_stream({
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_terminal_stream: (request: ExecuteTerminalBody) => Promise<AsyncIterable<string>>;
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal: (request: ExecuteTerminalBody, callbacks?: {
|
||||
onLine?: (line: string) => void | Promise<void>;
|
||||
onFinish?: (code: string) => void | Promise<void>;
|
||||
}) => Promise<void>;
|
||||
};
|
||||
|
||||
@@ -16,11 +16,11 @@ export function KomodoClient(url, options) {
|
||||
key: options.type === "api-key" ? options.params.key : undefined,
|
||||
secret: options.type === "api-key" ? options.params.secret : undefined,
|
||||
};
|
||||
const request = (path, request) => new Promise(async (res, rej) => {
|
||||
const request = (path, type, params) => new Promise(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + path, {
|
||||
let response = await fetch(`${url}${path}/${type}`, {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
body: JSON.stringify(params),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
@@ -67,14 +67,11 @@ export function KomodoClient(url, options) {
|
||||
});
|
||||
}
|
||||
});
|
||||
const auth = async (type, params) => await request("/auth", {
|
||||
type,
|
||||
params,
|
||||
});
|
||||
const user = async (type, params) => await request("/user", { type, params });
|
||||
const read = async (type, params) => await request("/read", { type, params });
|
||||
const write = async (type, params) => await request("/write", { type, params });
|
||||
const execute = async (type, params) => await request("/execute", { type, params });
|
||||
const auth = async (type, params) => await request("/auth", type, params);
|
||||
const user = async (type, params) => await request("/user", type, params);
|
||||
const read = async (type, params) => await request("/read", type, params);
|
||||
const write = async (type, params) => await request("/write", type, params);
|
||||
const execute = async (type, params) => await request("/execute", type, params);
|
||||
const execute_and_poll = async (type, params) => {
|
||||
const res = await execute(type, params);
|
||||
// Check if its a batch of updates or a single update;
|
||||
@@ -103,40 +100,50 @@ export function KomodoClient(url, options) {
|
||||
}
|
||||
};
|
||||
const core_version = () => read("GetVersion", {}).then((res) => res.version);
|
||||
const subscribe_to_update_websocket = async ({ on_update, on_login, on_close, retry_timeout_ms = 5_000, cancel = new CancelToken(), on_cancel, }) => {
|
||||
const get_update_websocket = ({ on_update, on_login, on_open, on_close, }) => {
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/update");
|
||||
// Handle login on websocket open
|
||||
ws.addEventListener("open", () => {
|
||||
on_open?.();
|
||||
const login_msg = options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
});
|
||||
ws.addEventListener("message", ({ data }) => {
|
||||
if (data == "LOGGED_IN")
|
||||
return on_login?.();
|
||||
on_update(JSON.parse(data));
|
||||
});
|
||||
if (on_close) {
|
||||
ws.addEventListener("close", on_close);
|
||||
}
|
||||
return ws;
|
||||
};
|
||||
const subscribe_to_update_websocket = async ({ on_update, on_open, on_login, on_close, retry = true, retry_timeout_ms = 5_000, cancel = new CancelToken(), on_cancel, }) => {
|
||||
while (true) {
|
||||
if (cancel.cancelled) {
|
||||
on_cancel?.();
|
||||
return;
|
||||
}
|
||||
try {
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/update");
|
||||
// Handle login on websocket open
|
||||
ws.addEventListener("open", () => {
|
||||
const login_msg = options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
const ws = get_update_websocket({
|
||||
on_open,
|
||||
on_login,
|
||||
on_update,
|
||||
on_close,
|
||||
});
|
||||
ws.addEventListener("message", ({ data }) => {
|
||||
if (data == "LOGGED_IN")
|
||||
return on_login?.();
|
||||
on_update(JSON.parse(data));
|
||||
});
|
||||
if (on_close) {
|
||||
ws.addEventListener("close", on_close);
|
||||
}
|
||||
// This while loop will end when the socket is closed
|
||||
while (ws.readyState !== WebSocket.CLOSING &&
|
||||
ws.readyState !== WebSocket.CLOSED) {
|
||||
@@ -145,13 +152,23 @@ export function KomodoClient(url, options) {
|
||||
// Sleep for a bit before checking for websocket closed
|
||||
await new Promise((resolve) => setTimeout(resolve, 500));
|
||||
}
|
||||
// Sleep for a bit before retrying connection to avoid spam.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
if (retry) {
|
||||
// Sleep for a bit before retrying connection to avoid spam.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
}
|
||||
else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
console.error(error);
|
||||
// Sleep for a bit before retrying, maybe Komodo Core is down temporarily.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
if (retry) {
|
||||
// Sleep for a bit before retrying, maybe Komodo Core is down temporarily.
|
||||
await new Promise((resolve) => setTimeout(resolve, retry_timeout_ms));
|
||||
}
|
||||
else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -191,6 +208,132 @@ export function KomodoClient(url, options) {
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const connect_container_exec = ({ query, on_message, on_login, on_open, on_close, }) => {
|
||||
const url_query = new URLSearchParams(query).toString();
|
||||
const ws = new WebSocket(url.replace("http", "ws") + "/ws/container?" + url_query);
|
||||
// Handle login on websocket open
|
||||
ws.onopen = () => {
|
||||
const login_msg = options.type === "jwt"
|
||||
? {
|
||||
type: "Jwt",
|
||||
params: {
|
||||
jwt: options.params.jwt,
|
||||
},
|
||||
}
|
||||
: {
|
||||
type: "ApiKeys",
|
||||
params: {
|
||||
key: options.params.key,
|
||||
secret: options.params.secret,
|
||||
},
|
||||
};
|
||||
ws.send(JSON.stringify(login_msg));
|
||||
on_open?.();
|
||||
};
|
||||
ws.onmessage = (e) => {
|
||||
if (e.data == "LOGGED_IN") {
|
||||
ws.binaryType = "arraybuffer";
|
||||
ws.onmessage = (e) => on_message?.(e);
|
||||
on_login?.();
|
||||
return;
|
||||
}
|
||||
else {
|
||||
on_message?.(e);
|
||||
}
|
||||
};
|
||||
ws.onclose = () => on_close?.();
|
||||
return ws;
|
||||
};
|
||||
const execute_terminal_stream = (request) => new Promise(async (res, rej) => {
|
||||
try {
|
||||
let response = await fetch(url + "/terminal/execute", {
|
||||
method: "POST",
|
||||
body: JSON.stringify(request),
|
||||
headers: {
|
||||
...(state.jwt
|
||||
? {
|
||||
authorization: state.jwt,
|
||||
}
|
||||
: state.key && state.secret
|
||||
? {
|
||||
"x-api-key": state.key,
|
||||
"x-api-secret": state.secret,
|
||||
}
|
||||
: {}),
|
||||
"content-type": "application/json",
|
||||
},
|
||||
});
|
||||
if (response.status === 200) {
|
||||
if (response.body) {
|
||||
const stream = response.body
|
||||
.pipeThrough(new TextDecoderStream("utf-8"))
|
||||
.pipeThrough(new TransformStream({
|
||||
start(_controller) {
|
||||
this.tail = "";
|
||||
},
|
||||
transform(chunk, controller) {
|
||||
const data = this.tail + chunk; // prepend any carry‑over
|
||||
const parts = data.split(/\r?\n/); // split on CRLF or LF
|
||||
this.tail = parts.pop(); // last item may be incomplete
|
||||
for (const line of parts)
|
||||
controller.enqueue(line);
|
||||
},
|
||||
flush(controller) {
|
||||
if (this.tail)
|
||||
controller.enqueue(this.tail); // final unterminated line
|
||||
},
|
||||
}));
|
||||
res(stream);
|
||||
}
|
||||
else {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: { error: "No response body", trace: [] },
|
||||
});
|
||||
}
|
||||
}
|
||||
else {
|
||||
try {
|
||||
const result = await response.json();
|
||||
rej({ status: response.status, result });
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: response.status,
|
||||
result: {
|
||||
error: "Failed to get response body",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
rej({
|
||||
status: 1,
|
||||
result: {
|
||||
error: "Request failed with error",
|
||||
trace: [JSON.stringify(error)],
|
||||
},
|
||||
error,
|
||||
});
|
||||
}
|
||||
});
|
||||
const execute_terminal = async (request, callbacks) => {
|
||||
const stream = await execute_terminal_stream(request);
|
||||
for await (const line of stream) {
|
||||
if (line.startsWith("__KOMODO_EXIT_CODE")) {
|
||||
await callbacks?.onFinish?.(line.split(":")[1]);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
await callbacks?.onLine?.(line);
|
||||
}
|
||||
}
|
||||
// This is hit if no __KOMODO_EXIT_CODE is sent, ie early exit
|
||||
await callbacks?.onFinish?.("Early exit without code");
|
||||
};
|
||||
return {
|
||||
/**
|
||||
* Call the `/auth` api.
|
||||
@@ -275,6 +418,11 @@ export function KomodoClient(url, options) {
|
||||
poll_update_until_complete,
|
||||
/** Returns the version of Komodo Core the client is calling to. */
|
||||
core_version,
|
||||
/**
|
||||
* Connects to update websocket, performs login and attaches handlers,
|
||||
* and returns the WebSocket handle.
|
||||
*/
|
||||
get_update_websocket,
|
||||
/**
|
||||
* Subscribes to the update websocket with automatic reconnect loop.
|
||||
*
|
||||
@@ -286,5 +434,53 @@ export function KomodoClient(url, options) {
|
||||
* for use with xtermjs.
|
||||
*/
|
||||
connect_terminal,
|
||||
/**
|
||||
* Subscribes to container exec io over websocket message,
|
||||
* for use with xtermjs.
|
||||
*/
|
||||
connect_container_exec,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and returns a stream to process the output as it comes in.
|
||||
*
|
||||
* Note. The final line of the stream will usually be
|
||||
* `__KOMODO_EXIT_CODE__:0`. The number
|
||||
* is the exit code of the command.
|
||||
*
|
||||
* If this line is NOT present, it means the stream
|
||||
* was terminated early, ie like running `exit`.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal_stream({
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* });
|
||||
*
|
||||
* for await (const line of stream) {
|
||||
* console.log(line);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
execute_terminal_stream,
|
||||
/**
|
||||
* Executes a command on a given Server / terminal,
|
||||
* and gives a callback to handle the output as it comes in.
|
||||
*
|
||||
* ```ts
|
||||
* const stream = await komodo.execute_terminal(
|
||||
* {
|
||||
* server: "my-server",
|
||||
* terminal: "name",
|
||||
* command: 'for i in {1..3}; do echo "$i"; sleep 1; done',
|
||||
* },
|
||||
* {
|
||||
* onLine: (line) => console.log(line),
|
||||
* onFinish: (code) => console.log("Finished:", code),
|
||||
* }
|
||||
* );
|
||||
* ```
|
||||
*/
|
||||
execute_terminal,
|
||||
};
|
||||
}
|
||||
|
||||
12
frontend/public/client/responses.d.ts
vendored
12
frontend/public/client/responses.d.ts
vendored
@@ -38,10 +38,6 @@ export type ReadResponses = {
|
||||
GetActionActionState: Types.GetActionActionStateResponse;
|
||||
ListActions: Types.ListActionsResponse;
|
||||
ListFullActions: Types.ListFullActionsResponse;
|
||||
GetServerTemplate: Types.GetServerTemplateResponse;
|
||||
GetServerTemplatesSummary: Types.GetServerTemplatesSummaryResponse;
|
||||
ListServerTemplates: Types.ListServerTemplatesResponse;
|
||||
ListFullServerTemplates: Types.ListFullServerTemplatesResponse;
|
||||
GetServersSummary: Types.GetServersSummaryResponse;
|
||||
GetServer: Types.GetServerResponse;
|
||||
GetServerState: Types.GetServerStateResponse;
|
||||
@@ -153,7 +149,6 @@ export type WriteResponses = {
|
||||
UpdatePermissionOnResourceType: Types.UpdatePermissionOnResourceTypeResponse;
|
||||
UpdatePermissionOnTarget: Types.UpdatePermissionOnTargetResponse;
|
||||
UpdateDescription: Types.UpdateDescriptionResponse;
|
||||
LaunchServer: Types.Update;
|
||||
CreateServer: Types.Server;
|
||||
DeleteServer: Types.Server;
|
||||
UpdateServer: Types.Server;
|
||||
@@ -182,11 +177,6 @@ export type WriteResponses = {
|
||||
DeleteBuilder: Types.Builder;
|
||||
UpdateBuilder: Types.Builder;
|
||||
RenameBuilder: Types.Update;
|
||||
CreateServerTemplate: Types.ServerTemplate;
|
||||
CopyServerTemplate: Types.ServerTemplate;
|
||||
DeleteServerTemplate: Types.ServerTemplate;
|
||||
UpdateServerTemplate: Types.ServerTemplate;
|
||||
RenameServerTemplate: Types.Update;
|
||||
CreateRepo: Types.Repo;
|
||||
CopyRepo: Types.Repo;
|
||||
DeleteRepo: Types.Repo;
|
||||
@@ -292,13 +282,13 @@ export type ExecuteResponses = {
|
||||
BatchRunProcedure: Types.BatchExecutionResponse;
|
||||
RunAction: Types.Update;
|
||||
BatchRunAction: Types.BatchExecutionResponse;
|
||||
LaunchServer: Types.Update;
|
||||
RunSync: Types.Update;
|
||||
DeployStack: Types.Update;
|
||||
BatchDeployStack: Types.BatchExecutionResponse;
|
||||
DeployStackIfChanged: Types.Update;
|
||||
BatchDeployStackIfChanged: Types.BatchExecutionResponse;
|
||||
PullStack: Types.Update;
|
||||
BatchPullStack: Types.BatchExecutionResponse;
|
||||
StartStack: Types.Update;
|
||||
RestartStack: Types.Update;
|
||||
StopStack: Types.Update;
|
||||
|
||||
394
frontend/public/client/types.d.ts
vendored
394
frontend/public/client/types.d.ts
vendored
@@ -89,6 +89,11 @@ export interface ActionConfig {
|
||||
* If its an empty string, use the default secret from the config.
|
||||
*/
|
||||
webhook_secret?: string;
|
||||
/**
|
||||
* Whether deno will be instructed to reload all dependencies,
|
||||
* this can usually be kept false outside of development.
|
||||
*/
|
||||
reload_deno_deps?: boolean;
|
||||
/**
|
||||
* Typescript file contents using pre-initialized `komodo` client.
|
||||
* Supports variable / secret interpolation.
|
||||
@@ -213,9 +218,6 @@ export type ResourceTarget = {
|
||||
} | {
|
||||
type: "Alerter";
|
||||
id: string;
|
||||
} | {
|
||||
type: "ServerTemplate";
|
||||
id: string;
|
||||
} | {
|
||||
type: "ResourceSync";
|
||||
id: string;
|
||||
@@ -674,6 +676,9 @@ export type Execution =
|
||||
} | {
|
||||
type: "PullStack";
|
||||
params: PullStack;
|
||||
} | {
|
||||
type: "BatchPullStack";
|
||||
params: BatchPullStack;
|
||||
} | {
|
||||
type: "StartStack";
|
||||
params: StartStack;
|
||||
@@ -1832,19 +1837,6 @@ export interface ServerConfig {
|
||||
}
|
||||
export type Server = Resource<ServerConfig, undefined>;
|
||||
export type GetServerResponse = Server;
|
||||
export type ServerTemplateConfig =
|
||||
/** Template to launch an AWS EC2 instance */
|
||||
{
|
||||
type: "Aws";
|
||||
params: AwsServerTemplateConfig;
|
||||
}
|
||||
/** Template to launch a Hetzner server */
|
||||
| {
|
||||
type: "Hetzner";
|
||||
params: HetznerServerTemplateConfig;
|
||||
};
|
||||
export type ServerTemplate = Resource<ServerTemplateConfig, undefined>;
|
||||
export type GetServerTemplateResponse = ServerTemplate;
|
||||
export interface StackActionState {
|
||||
pulling: boolean;
|
||||
deploying: boolean;
|
||||
@@ -2106,8 +2098,10 @@ export interface SystemInformation {
|
||||
host_name?: string;
|
||||
/** The CPU's brand */
|
||||
cpu_brand: string;
|
||||
/** Whether terminals are disabled on this Periphery */
|
||||
/** Whether terminals are disabled on this Periphery server */
|
||||
terminals_disabled: boolean;
|
||||
/** Whether container exec is disabled on this Periphery server */
|
||||
container_exec_disabled: boolean;
|
||||
}
|
||||
export type GetSystemInformationResponse = SystemInformation;
|
||||
/** Info for a single disk mounted on the system. */
|
||||
@@ -2338,11 +2332,6 @@ export declare enum Operation {
|
||||
RenameAlerter = "RenameAlerter",
|
||||
DeleteAlerter = "DeleteAlerter",
|
||||
TestAlerter = "TestAlerter",
|
||||
CreateServerTemplate = "CreateServerTemplate",
|
||||
UpdateServerTemplate = "UpdateServerTemplate",
|
||||
RenameServerTemplate = "RenameServerTemplate",
|
||||
DeleteServerTemplate = "DeleteServerTemplate",
|
||||
LaunchServer = "LaunchServer",
|
||||
CreateResourceSync = "CreateResourceSync",
|
||||
UpdateResourceSync = "UpdateResourceSync",
|
||||
RenameResourceSync = "RenameResourceSync",
|
||||
@@ -3340,7 +3329,6 @@ export type ListFullDeploymentsResponse = Deployment[];
|
||||
export type ListFullProceduresResponse = Procedure[];
|
||||
export type ListFullReposResponse = Repo[];
|
||||
export type ListFullResourceSyncsResponse = ResourceSync[];
|
||||
export type ListFullServerTemplatesResponse = ServerTemplate[];
|
||||
export type ListFullServersResponse = Server[];
|
||||
export type ListFullStacksResponse = Stack[];
|
||||
export type ListGitProviderAccountsResponse = GitProviderAccount[];
|
||||
@@ -3485,14 +3473,6 @@ export interface ResourceSyncListItemInfo {
|
||||
export type ResourceSyncListItem = ResourceListItem<ResourceSyncListItemInfo>;
|
||||
export type ListResourceSyncsResponse = ResourceSyncListItem[];
|
||||
export type ListSecretsResponse = string[];
|
||||
export interface ServerTemplateListItemInfo {
|
||||
/** The cloud provider */
|
||||
provider: string;
|
||||
/** The instance type, eg c5.2xlarge on for Aws templates */
|
||||
instance_type?: string;
|
||||
}
|
||||
export type ServerTemplateListItem = ResourceListItem<ServerTemplateListItemInfo>;
|
||||
export type ListServerTemplatesResponse = ServerTemplateListItem[];
|
||||
export declare enum ServerState {
|
||||
/** Server is unreachable. */
|
||||
NotOk = "NotOk",
|
||||
@@ -3518,6 +3498,8 @@ export interface ServerListItemInfo {
|
||||
send_disk_alerts: boolean;
|
||||
/** Whether terminals are disabled for this Server. */
|
||||
terminals_disabled: boolean;
|
||||
/** Whether container exec is disabled for this Server. */
|
||||
container_exec_disabled: boolean;
|
||||
}
|
||||
export type ServerListItem = ResourceListItem<ServerListItemInfo>;
|
||||
export type ListServersResponse = ServerListItem[];
|
||||
@@ -3670,10 +3652,6 @@ export interface ServerQuerySpecifics {
|
||||
}
|
||||
/** Server-specific query */
|
||||
export type ServerQuery = ResourceQuery<ServerQuerySpecifics>;
|
||||
export interface ServerTemplateQuerySpecifics {
|
||||
types: ServerTemplateConfig["type"][];
|
||||
}
|
||||
export type ServerTemplateQuery = ResourceQuery<ServerTemplateQuerySpecifics>;
|
||||
export type SetLastSeenUpdateResponse = NoData;
|
||||
export interface StackQuerySpecifics {
|
||||
/**
|
||||
@@ -3706,13 +3684,11 @@ export type UpdateVariableValueResponse = Variable;
|
||||
export type _PartialActionConfig = Partial<ActionConfig>;
|
||||
export type _PartialAlerterConfig = Partial<AlerterConfig>;
|
||||
export type _PartialAwsBuilderConfig = Partial<AwsBuilderConfig>;
|
||||
export type _PartialAwsServerTemplateConfig = Partial<AwsServerTemplateConfig>;
|
||||
export type _PartialBuildConfig = Partial<BuildConfig>;
|
||||
export type _PartialBuilderConfig = Partial<BuilderConfig>;
|
||||
export type _PartialDeploymentConfig = Partial<DeploymentConfig>;
|
||||
export type _PartialDockerRegistryAccount = Partial<DockerRegistryAccount>;
|
||||
export type _PartialGitProviderAccount = Partial<GitProviderAccount>;
|
||||
export type _PartialHetznerServerTemplateConfig = Partial<HetznerServerTemplateConfig>;
|
||||
export type _PartialProcedureConfig = Partial<ProcedureConfig>;
|
||||
export type _PartialRepoConfig = Partial<RepoConfig>;
|
||||
export type _PartialResourceSyncConfig = Partial<ResourceSyncConfig>;
|
||||
@@ -3781,64 +3757,6 @@ export interface AwsBuilderConfig {
|
||||
/** Which secrets are available on the AMI. */
|
||||
secrets?: string[];
|
||||
}
|
||||
export declare enum AwsVolumeType {
|
||||
Gp2 = "gp2",
|
||||
Gp3 = "gp3",
|
||||
Io1 = "io1",
|
||||
Io2 = "io2"
|
||||
}
|
||||
/**
|
||||
* For information on AWS volumes, see
|
||||
* `<https://docs.aws.amazon.com/ebs/latest/userguide/ebs-volume-types.html>`.
|
||||
*/
|
||||
export interface AwsVolume {
|
||||
/** The device name (for example, `/dev/sda1` or `xvdh`). */
|
||||
device_name: string;
|
||||
/** The size of the volume in GB */
|
||||
size_gb: number;
|
||||
/** The type of volume. Options: gp2, gp3, io1, io2. */
|
||||
volume_type: AwsVolumeType;
|
||||
/** The iops of the volume, or 0 for AWS default. */
|
||||
iops: number;
|
||||
/** The throughput of the volume, or 0 for AWS default. */
|
||||
throughput: number;
|
||||
}
|
||||
/** Aws EC2 instance config. */
|
||||
export interface AwsServerTemplateConfig {
|
||||
/** The aws region to launch the server in, eg. us-east-1 */
|
||||
region: string;
|
||||
/** The instance type to launch, eg. c5.2xlarge */
|
||||
instance_type: string;
|
||||
/** Specify the ami id to use. Must be set up to start the periphery binary on startup. */
|
||||
ami_id: string;
|
||||
/** The subnet to assign to the instance. */
|
||||
subnet_id: string;
|
||||
/** The key pair name to give to the instance in case SSH access required. */
|
||||
key_pair_name: string;
|
||||
/**
|
||||
* Assign a public ip to the instance. Depending on how your network is
|
||||
* setup, this may be required for the instance to reach the public internet.
|
||||
*/
|
||||
assign_public_ip: boolean;
|
||||
/**
|
||||
* Use the instances public ip as the address for the server.
|
||||
* Could be used when build instances are created in another non-interconnected network to the core api.
|
||||
*/
|
||||
use_public_ip: boolean;
|
||||
/**
|
||||
* The port periphery will be running on in AMI.
|
||||
* Default: `8120`
|
||||
*/
|
||||
port: number;
|
||||
/** Whether Periphery will be running on https */
|
||||
use_https: boolean;
|
||||
/** The security groups to give to the instance. */
|
||||
security_group_ids?: string[];
|
||||
/** Specify the EBS volumes to attach. */
|
||||
volumes: AwsVolume[];
|
||||
/** The user data to deploy the instance with. */
|
||||
user_data: string;
|
||||
}
|
||||
/** Builds multiple Repos in parallel that match pattern. Response: [BatchExecutionResponse]. */
|
||||
export interface BatchBuildRepo {
|
||||
/**
|
||||
@@ -3971,6 +3889,22 @@ export interface BatchPullRepo {
|
||||
*/
|
||||
pattern: string;
|
||||
}
|
||||
/** Pulls multiple Stacks in parallel that match pattern. Response: [BatchExecutionResponse]. */
|
||||
export interface BatchPullStack {
|
||||
/**
|
||||
* Id or name or wildcard pattern or regex.
|
||||
* Supports multiline and comma delineated combinations of the above.
|
||||
*
|
||||
* Example:
|
||||
* ```
|
||||
* # match all foo-* stacks
|
||||
* foo-*
|
||||
* # add some more
|
||||
* extra-stack-1, extra-stack-2
|
||||
* ```
|
||||
*/
|
||||
pattern: string;
|
||||
}
|
||||
/** Runs multiple Actions in parallel that match pattern. Response: [BatchExecutionResponse] */
|
||||
export interface BatchRunAction {
|
||||
/**
|
||||
@@ -4107,6 +4041,18 @@ export interface CommitSync {
|
||||
/** Id or name */
|
||||
sync: string;
|
||||
}
|
||||
/**
|
||||
* Query to connect to a container exec session (interactive shell over websocket) on the given server.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ConnectContainerExecQuery {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/** The container name */
|
||||
container: string;
|
||||
/** The shell to connect to */
|
||||
shell: string;
|
||||
}
|
||||
/**
|
||||
* Query to connect to a terminal (interactive shell over websocket) on the given server.
|
||||
* TODO: Document calling.
|
||||
@@ -4116,15 +4062,11 @@ export interface ConnectTerminalQuery {
|
||||
server: string;
|
||||
/**
|
||||
* Each periphery can keep multiple terminals open.
|
||||
* If a terminals with the specified name already exists,
|
||||
* it will be attached to.
|
||||
* Otherwise a new terminal will be created for the command,
|
||||
* which will persist until it is deleted using
|
||||
* [DeleteTerminal][crate::api::write::server::DeleteTerminal]
|
||||
* If a terminals with the specified name does not exist,
|
||||
* the call will fail.
|
||||
* Create a terminal using [CreateTerminal][super::write::server::CreateTerminal]
|
||||
*/
|
||||
terminal: string;
|
||||
/** Optional. The initial command to execute on connection to the shell. */
|
||||
init?: string;
|
||||
}
|
||||
export interface Conversion {
|
||||
/** reference on the server. */
|
||||
@@ -4212,16 +4154,6 @@ export interface CopyResourceSync {
|
||||
/** The id of the sync to copy. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Creates a new server template with given `name` and the configuration
|
||||
* of the server template at the given `id`. Response: [ServerTemplate]
|
||||
*/
|
||||
export interface CopyServerTemplate {
|
||||
/** The name of the new server template. */
|
||||
name: string;
|
||||
/** The id of the server template to copy. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Creates a new stack with given `name` and the configuration
|
||||
* of the stack at the given `id`. Response: [Stack].
|
||||
@@ -4421,20 +4353,6 @@ export interface CreateServer {
|
||||
/** Optional partial config to initialize the server with. */
|
||||
config?: _PartialServerConfig;
|
||||
}
|
||||
export type PartialServerTemplateConfig = {
|
||||
type: "Aws";
|
||||
params: _PartialAwsServerTemplateConfig;
|
||||
} | {
|
||||
type: "Hetzner";
|
||||
params: _PartialHetznerServerTemplateConfig;
|
||||
};
|
||||
/** Create a server template. Response: [ServerTemplate]. */
|
||||
export interface CreateServerTemplate {
|
||||
/** The name given to newly created server template. */
|
||||
name: string;
|
||||
/** Optional partial config to initialize the server template with. */
|
||||
config?: PartialServerTemplateConfig;
|
||||
}
|
||||
/**
|
||||
* **Admin only.** Create a service user.
|
||||
* Response: [User].
|
||||
@@ -4702,14 +4620,6 @@ export interface DeleteServer {
|
||||
/** The id or name of the server to delete. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Deletes the server template at the given id, and returns the deleted server template.
|
||||
* Response: [ServerTemplate]
|
||||
*/
|
||||
export interface DeleteServerTemplate {
|
||||
/** The id or name of the server template to delete. */
|
||||
id: string;
|
||||
}
|
||||
/**
|
||||
* Deletes the stack at the given id, and returns the deleted stack.
|
||||
* Response: [Stack]
|
||||
@@ -4902,6 +4812,23 @@ export interface ExchangeForJwt {
|
||||
/** The 'exchange token' */
|
||||
token: string;
|
||||
}
|
||||
/**
|
||||
* Execute a terminal command on the given server.
|
||||
* TODO: Document calling.
|
||||
*/
|
||||
export interface ExecuteTerminalBody {
|
||||
/** Server Id or name */
|
||||
server: string;
|
||||
/**
|
||||
* The name of the terminal on the server to use to execute.
|
||||
* If the terminal at name exists, it will be used to execute the command.
|
||||
* Otherwise, a new terminal will be created for this command, which will
|
||||
* persist until it exits or is deleted.
|
||||
*/
|
||||
terminal: string;
|
||||
/** The command to execute. */
|
||||
command: string;
|
||||
}
|
||||
/**
|
||||
* Get pretty formatted monrun sync toml for all resources
|
||||
* which the user has permissions to view.
|
||||
@@ -5123,6 +5050,8 @@ export interface GetCoreInfoResponse {
|
||||
disable_confirm_dialog: boolean;
|
||||
/** The repo owners for which github webhook management api is available */
|
||||
github_webhook_owners: string[];
|
||||
/** Whether to disable websocket automatic reconnect. */
|
||||
disable_websocket_reconnect: boolean;
|
||||
}
|
||||
/** Get a specific deployment by name or id. Response: [Deployment]. */
|
||||
export interface GetDeployment {
|
||||
@@ -5466,22 +5395,6 @@ export interface GetServerStateResponse {
|
||||
/** The server status. */
|
||||
status: ServerState;
|
||||
}
|
||||
/** Get a specific server template by id or name. Response: [ServerTemplate]. */
|
||||
export interface GetServerTemplate {
|
||||
/** Id or name */
|
||||
server_template: string;
|
||||
}
|
||||
/**
|
||||
* Gets a summary of data relating to all server templates.
|
||||
* Response: [GetServerTemplatesSummaryResponse].
|
||||
*/
|
||||
export interface GetServerTemplatesSummary {
|
||||
}
|
||||
/** Response for [GetServerTemplatesSummary]. */
|
||||
export interface GetServerTemplatesSummaryResponse {
|
||||
/** The total number of server templates. */
|
||||
total: number;
|
||||
}
|
||||
/**
|
||||
* Gets a summary of data relating to all servers.
|
||||
* Response: [GetServersSummaryResponse].
|
||||
@@ -5669,107 +5582,6 @@ export interface GetVersionResponse {
|
||||
/** The version of the core api. */
|
||||
version: string;
|
||||
}
|
||||
export declare enum HetznerDatacenter {
|
||||
Nuremberg1Dc3 = "Nuremberg1Dc3",
|
||||
Helsinki1Dc2 = "Helsinki1Dc2",
|
||||
Falkenstein1Dc14 = "Falkenstein1Dc14",
|
||||
AshburnDc1 = "AshburnDc1",
|
||||
HillsboroDc1 = "HillsboroDc1",
|
||||
SingaporeDc1 = "SingaporeDc1"
|
||||
}
|
||||
export declare enum HetznerServerType {
|
||||
/** CPX11 - AMD 2 Cores, 2 Gb Ram, 40 Gb disk */
|
||||
SharedAmd2Core2Ram40Disk = "SharedAmd2Core2Ram40Disk",
|
||||
/** CAX11 - Arm 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
SharedArm2Core4Ram40Disk = "SharedArm2Core4Ram40Disk",
|
||||
/** CX22 - Intel 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
SharedIntel2Core4Ram40Disk = "SharedIntel2Core4Ram40Disk",
|
||||
/** CPX21 - AMD 3 Cores, 4 Gb Ram, 80 Gb disk */
|
||||
SharedAmd3Core4Ram80Disk = "SharedAmd3Core4Ram80Disk",
|
||||
/** CAX21 - Arm 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
SharedArm4Core8Ram80Disk = "SharedArm4Core8Ram80Disk",
|
||||
/** CX32 - Intel 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
SharedIntel4Core8Ram80Disk = "SharedIntel4Core8Ram80Disk",
|
||||
/** CPX31 - AMD 4 Cores, 8 Gb Ram, 160 Gb disk */
|
||||
SharedAmd4Core8Ram160Disk = "SharedAmd4Core8Ram160Disk",
|
||||
/** CAX31 - Arm 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
SharedArm8Core16Ram160Disk = "SharedArm8Core16Ram160Disk",
|
||||
/** CX42 - Intel 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
SharedIntel8Core16Ram160Disk = "SharedIntel8Core16Ram160Disk",
|
||||
/** CPX41 - AMD 8 Cores, 16 Gb Ram, 240 Gb disk */
|
||||
SharedAmd8Core16Ram240Disk = "SharedAmd8Core16Ram240Disk",
|
||||
/** CAX41 - Arm 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
SharedArm16Core32Ram320Disk = "SharedArm16Core32Ram320Disk",
|
||||
/** CX52 - Intel 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
SharedIntel16Core32Ram320Disk = "SharedIntel16Core32Ram320Disk",
|
||||
/** CPX51 - AMD 16 Cores, 32 Gb Ram, 360 Gb disk */
|
||||
SharedAmd16Core32Ram360Disk = "SharedAmd16Core32Ram360Disk",
|
||||
/** CCX13 - AMD 2 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
DedicatedAmd2Core8Ram80Disk = "DedicatedAmd2Core8Ram80Disk",
|
||||
/** CCX23 - AMD 4 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
DedicatedAmd4Core16Ram160Disk = "DedicatedAmd4Core16Ram160Disk",
|
||||
/** CCX33 - AMD 8 Cores, 32 Gb Ram, 240 Gb disk */
|
||||
DedicatedAmd8Core32Ram240Disk = "DedicatedAmd8Core32Ram240Disk",
|
||||
/** CCX43 - AMD 16 Cores, 64 Gb Ram, 360 Gb disk */
|
||||
DedicatedAmd16Core64Ram360Disk = "DedicatedAmd16Core64Ram360Disk",
|
||||
/** CCX53 - AMD 32 Cores, 128 Gb Ram, 600 Gb disk */
|
||||
DedicatedAmd32Core128Ram600Disk = "DedicatedAmd32Core128Ram600Disk",
|
||||
/** CCX63 - AMD 48 Cores, 192 Gb Ram, 960 Gb disk */
|
||||
DedicatedAmd48Core192Ram960Disk = "DedicatedAmd48Core192Ram960Disk"
|
||||
}
|
||||
export declare enum HetznerVolumeFormat {
|
||||
Xfs = "Xfs",
|
||||
Ext4 = "Ext4"
|
||||
}
|
||||
export interface HetznerVolumeSpecs {
|
||||
/** A name for the volume */
|
||||
name: string;
|
||||
/** Size of the volume in GB */
|
||||
size_gb: I64;
|
||||
/** The format for the volume */
|
||||
format?: HetznerVolumeFormat;
|
||||
/** Labels for the volume */
|
||||
labels?: Record<string, string>;
|
||||
}
|
||||
/** Hetzner server config. */
|
||||
export interface HetznerServerTemplateConfig {
|
||||
/** ID or name of the Image the Server is created from */
|
||||
image: string;
|
||||
/** ID or name of Datacenter to create Server in */
|
||||
datacenter?: HetznerDatacenter;
|
||||
/**
|
||||
* ID of the Placement Group the server should be in,
|
||||
* Or 0 to not use placement group.
|
||||
*/
|
||||
placement_group?: I64;
|
||||
/** ID or name of the Server type this Server should be created with */
|
||||
server_type?: HetznerServerType;
|
||||
/** SSH key IDs ( integer ) or names ( string ) which should be injected into the Server at creation time */
|
||||
ssh_keys?: string[];
|
||||
/** Network IDs which should be attached to the Server private network interface at the creation time */
|
||||
private_network_ids?: I64[];
|
||||
/** Attach an IPv4 on the public NIC. If false, no IPv4 address will be attached. */
|
||||
enable_public_ipv4?: boolean;
|
||||
/** Attach an IPv6 on the public NIC. If false, no IPv6 address will be attached. */
|
||||
enable_public_ipv6?: boolean;
|
||||
/** Connect to the instance using it's public ip. */
|
||||
use_public_ip?: boolean;
|
||||
/**
|
||||
* The port periphery will be running on in AMI.
|
||||
* Default: `8120`
|
||||
*/
|
||||
port: number;
|
||||
/** Whether Periphery will be running on https */
|
||||
use_https: boolean;
|
||||
/** The firewalls to attach to the instance */
|
||||
firewall_ids?: I64[];
|
||||
/** Labels for the server */
|
||||
labels?: Record<string, string>;
|
||||
/** Specs for volumes to attach */
|
||||
volumes?: HetznerVolumeSpecs[];
|
||||
/** Cloud-Init user data to use during Server creation. This field is limited to 32KiB. */
|
||||
user_data: string;
|
||||
}
|
||||
/** Inspect a docker container on the server. Response: [Container]. */
|
||||
export interface InspectDockerContainer {
|
||||
/** Id or name */
|
||||
@@ -5802,16 +5614,6 @@ export interface LatestCommit {
|
||||
hash: string;
|
||||
message: string;
|
||||
}
|
||||
/**
|
||||
* Launch an EC2 instance with the specified config.
|
||||
* Response: [Update].
|
||||
*/
|
||||
export interface LaunchServer {
|
||||
/** The name of the created server. */
|
||||
name: string;
|
||||
/** The server template used to define the config. */
|
||||
server_template: string;
|
||||
}
|
||||
/** List actions matching optional query. Response: [ListActionsResponse]. */
|
||||
export interface ListActions {
|
||||
/** optional structured query to filter actions. */
|
||||
@@ -6071,10 +5873,6 @@ export interface ListFullResourceSyncs {
|
||||
/** optional structured query to filter syncs. */
|
||||
query?: ResourceSyncQuery;
|
||||
}
|
||||
/** List server templates matching structured query. Response: [ListFullServerTemplatesResponse]. */
|
||||
export interface ListFullServerTemplates {
|
||||
query?: ServerTemplateQuery;
|
||||
}
|
||||
/** List servers matching optional query. Response: [ListFullServersResponse]. */
|
||||
export interface ListFullServers {
|
||||
/** optional structured query to filter servers. */
|
||||
@@ -6144,10 +5942,6 @@ export interface ListSecrets {
|
||||
*/
|
||||
target?: ResourceTarget;
|
||||
}
|
||||
/** List server templates matching structured query. Response: [ListServerTemplatesResponse]. */
|
||||
export interface ListServerTemplates {
|
||||
query?: ServerTemplateQuery;
|
||||
}
|
||||
/** List servers matching optional query. Response: [ListServersResponse]. */
|
||||
export interface ListServers {
|
||||
/** optional structured query to filter servers. */
|
||||
@@ -6597,16 +6391,6 @@ export interface RenameServer {
|
||||
/** The new name. */
|
||||
name: string;
|
||||
}
|
||||
/**
|
||||
* Rename the ServerTemplate at id to the given name.
|
||||
* Response: [Update].
|
||||
*/
|
||||
export interface RenameServerTemplate {
|
||||
/** The id or name of the ServerTemplate to rename. */
|
||||
id: string;
|
||||
/** The new name. */
|
||||
name: string;
|
||||
}
|
||||
/** Rename the stack at id to the given name. Response: [Update]. */
|
||||
export interface RenameStack {
|
||||
/** The id of the stack to rename. */
|
||||
@@ -6674,7 +6458,6 @@ export interface ResourcesToml {
|
||||
actions?: ResourceToml<_PartialActionConfig>[];
|
||||
alerters?: ResourceToml<_PartialAlerterConfig>[];
|
||||
builders?: ResourceToml<_PartialBuilderConfig>[];
|
||||
server_templates?: ResourceToml<PartialServerTemplateConfig>[];
|
||||
resource_syncs?: ResourceToml<_PartialResourceSyncConfig>[];
|
||||
user_groups?: UserGroupToml[];
|
||||
variables?: Variable[];
|
||||
@@ -7063,7 +6846,7 @@ export interface UpdateAlerter {
|
||||
* field changes occur from out of date local state.
|
||||
*/
|
||||
export interface UpdateBuild {
|
||||
/** The id of the build to update. */
|
||||
/** The id or name of the build to update. */
|
||||
id: string;
|
||||
/** The partial config update to apply. */
|
||||
config: _PartialBuildConfig;
|
||||
@@ -7224,22 +7007,6 @@ export interface UpdateServer {
|
||||
/** The partial config update to apply. */
|
||||
config: _PartialServerConfig;
|
||||
}
|
||||
/**
|
||||
* Update the server template at the given id, and return the updated server template.
|
||||
* Response: [ServerTemplate].
|
||||
*
|
||||
* Note. This method updates only the fields which are set in the [PartialServerTemplateConfig],
|
||||
* effectively merging diffs into the final document.
|
||||
* This is helpful when multiple users are using
|
||||
* the same resources concurrently by ensuring no unintentional
|
||||
* field changes occur from out of date local state.
|
||||
*/
|
||||
export interface UpdateServerTemplate {
|
||||
/** The id of the server template to update. */
|
||||
id: string;
|
||||
/** The partial config update to apply. */
|
||||
config: PartialServerTemplateConfig;
|
||||
}
|
||||
/**
|
||||
* **Admin only.** Update a service user's description.
|
||||
* Response: [User].
|
||||
@@ -7508,6 +7275,9 @@ export type ExecuteRequest = {
|
||||
} | {
|
||||
type: "PullStack";
|
||||
params: PullStack;
|
||||
} | {
|
||||
type: "BatchPullStack";
|
||||
params: BatchPullStack;
|
||||
} | {
|
||||
type: "StartStack";
|
||||
params: StartStack;
|
||||
@@ -7571,9 +7341,6 @@ export type ExecuteRequest = {
|
||||
} | {
|
||||
type: "BatchRunAction";
|
||||
params: BatchRunAction;
|
||||
} | {
|
||||
type: "LaunchServer";
|
||||
params: LaunchServer;
|
||||
} | {
|
||||
type: "TestAlerter";
|
||||
params: TestAlerter;
|
||||
@@ -7668,18 +7435,6 @@ export type ReadRequest = {
|
||||
} | {
|
||||
type: "ListFullActions";
|
||||
params: ListFullActions;
|
||||
} | {
|
||||
type: "GetServerTemplate";
|
||||
params: GetServerTemplate;
|
||||
} | {
|
||||
type: "GetServerTemplatesSummary";
|
||||
params: GetServerTemplatesSummary;
|
||||
} | {
|
||||
type: "ListServerTemplates";
|
||||
params: ListServerTemplates;
|
||||
} | {
|
||||
type: "ListFullServerTemplates";
|
||||
params: ListFullServerTemplates;
|
||||
} | {
|
||||
type: "GetServersSummary";
|
||||
params: GetServersSummary;
|
||||
@@ -8105,21 +7860,6 @@ export type WriteRequest = {
|
||||
} | {
|
||||
type: "RenameBuilder";
|
||||
params: RenameBuilder;
|
||||
} | {
|
||||
type: "CreateServerTemplate";
|
||||
params: CreateServerTemplate;
|
||||
} | {
|
||||
type: "CopyServerTemplate";
|
||||
params: CopyServerTemplate;
|
||||
} | {
|
||||
type: "DeleteServerTemplate";
|
||||
params: DeleteServerTemplate;
|
||||
} | {
|
||||
type: "UpdateServerTemplate";
|
||||
params: UpdateServerTemplate;
|
||||
} | {
|
||||
type: "RenameServerTemplate";
|
||||
params: RenameServerTemplate;
|
||||
} | {
|
||||
type: "CreateRepo";
|
||||
params: CreateRepo;
|
||||
|
||||
@@ -269,11 +269,6 @@ export var Operation;
|
||||
Operation["RenameAlerter"] = "RenameAlerter";
|
||||
Operation["DeleteAlerter"] = "DeleteAlerter";
|
||||
Operation["TestAlerter"] = "TestAlerter";
|
||||
Operation["CreateServerTemplate"] = "CreateServerTemplate";
|
||||
Operation["UpdateServerTemplate"] = "UpdateServerTemplate";
|
||||
Operation["RenameServerTemplate"] = "RenameServerTemplate";
|
||||
Operation["DeleteServerTemplate"] = "DeleteServerTemplate";
|
||||
Operation["LaunchServer"] = "LaunchServer";
|
||||
Operation["CreateResourceSync"] = "CreateResourceSync";
|
||||
Operation["UpdateResourceSync"] = "UpdateResourceSync";
|
||||
Operation["RenameResourceSync"] = "RenameResourceSync";
|
||||
@@ -466,13 +461,6 @@ export var StackState;
|
||||
/** Server not reachable */
|
||||
StackState["Unknown"] = "unknown";
|
||||
})(StackState || (StackState = {}));
|
||||
export var AwsVolumeType;
|
||||
(function (AwsVolumeType) {
|
||||
AwsVolumeType["Gp2"] = "gp2";
|
||||
AwsVolumeType["Gp3"] = "gp3";
|
||||
AwsVolumeType["Io1"] = "io1";
|
||||
AwsVolumeType["Io2"] = "io2";
|
||||
})(AwsVolumeType || (AwsVolumeType = {}));
|
||||
export var RepoWebhookAction;
|
||||
(function (RepoWebhookAction) {
|
||||
RepoWebhookAction["Clone"] = "Clone";
|
||||
@@ -505,61 +493,6 @@ export var TerminalRecreateMode;
|
||||
/** Only kill and recreate if the command is different. */
|
||||
TerminalRecreateMode["DifferentCommand"] = "DifferentCommand";
|
||||
})(TerminalRecreateMode || (TerminalRecreateMode = {}));
|
||||
export var HetznerDatacenter;
|
||||
(function (HetznerDatacenter) {
|
||||
HetznerDatacenter["Nuremberg1Dc3"] = "Nuremberg1Dc3";
|
||||
HetznerDatacenter["Helsinki1Dc2"] = "Helsinki1Dc2";
|
||||
HetznerDatacenter["Falkenstein1Dc14"] = "Falkenstein1Dc14";
|
||||
HetznerDatacenter["AshburnDc1"] = "AshburnDc1";
|
||||
HetznerDatacenter["HillsboroDc1"] = "HillsboroDc1";
|
||||
HetznerDatacenter["SingaporeDc1"] = "SingaporeDc1";
|
||||
})(HetznerDatacenter || (HetznerDatacenter = {}));
|
||||
export var HetznerServerType;
|
||||
(function (HetznerServerType) {
|
||||
/** CPX11 - AMD 2 Cores, 2 Gb Ram, 40 Gb disk */
|
||||
HetznerServerType["SharedAmd2Core2Ram40Disk"] = "SharedAmd2Core2Ram40Disk";
|
||||
/** CAX11 - Arm 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
HetznerServerType["SharedArm2Core4Ram40Disk"] = "SharedArm2Core4Ram40Disk";
|
||||
/** CX22 - Intel 2 Cores, 4 Gb Ram, 40 Gb disk */
|
||||
HetznerServerType["SharedIntel2Core4Ram40Disk"] = "SharedIntel2Core4Ram40Disk";
|
||||
/** CPX21 - AMD 3 Cores, 4 Gb Ram, 80 Gb disk */
|
||||
HetznerServerType["SharedAmd3Core4Ram80Disk"] = "SharedAmd3Core4Ram80Disk";
|
||||
/** CAX21 - Arm 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
HetznerServerType["SharedArm4Core8Ram80Disk"] = "SharedArm4Core8Ram80Disk";
|
||||
/** CX32 - Intel 4 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
HetznerServerType["SharedIntel4Core8Ram80Disk"] = "SharedIntel4Core8Ram80Disk";
|
||||
/** CPX31 - AMD 4 Cores, 8 Gb Ram, 160 Gb disk */
|
||||
HetznerServerType["SharedAmd4Core8Ram160Disk"] = "SharedAmd4Core8Ram160Disk";
|
||||
/** CAX31 - Arm 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
HetznerServerType["SharedArm8Core16Ram160Disk"] = "SharedArm8Core16Ram160Disk";
|
||||
/** CX42 - Intel 8 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
HetznerServerType["SharedIntel8Core16Ram160Disk"] = "SharedIntel8Core16Ram160Disk";
|
||||
/** CPX41 - AMD 8 Cores, 16 Gb Ram, 240 Gb disk */
|
||||
HetznerServerType["SharedAmd8Core16Ram240Disk"] = "SharedAmd8Core16Ram240Disk";
|
||||
/** CAX41 - Arm 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
HetznerServerType["SharedArm16Core32Ram320Disk"] = "SharedArm16Core32Ram320Disk";
|
||||
/** CX52 - Intel 16 Cores, 32 Gb Ram, 320 Gb disk */
|
||||
HetznerServerType["SharedIntel16Core32Ram320Disk"] = "SharedIntel16Core32Ram320Disk";
|
||||
/** CPX51 - AMD 16 Cores, 32 Gb Ram, 360 Gb disk */
|
||||
HetznerServerType["SharedAmd16Core32Ram360Disk"] = "SharedAmd16Core32Ram360Disk";
|
||||
/** CCX13 - AMD 2 Cores, 8 Gb Ram, 80 Gb disk */
|
||||
HetznerServerType["DedicatedAmd2Core8Ram80Disk"] = "DedicatedAmd2Core8Ram80Disk";
|
||||
/** CCX23 - AMD 4 Cores, 16 Gb Ram, 160 Gb disk */
|
||||
HetznerServerType["DedicatedAmd4Core16Ram160Disk"] = "DedicatedAmd4Core16Ram160Disk";
|
||||
/** CCX33 - AMD 8 Cores, 32 Gb Ram, 240 Gb disk */
|
||||
HetznerServerType["DedicatedAmd8Core32Ram240Disk"] = "DedicatedAmd8Core32Ram240Disk";
|
||||
/** CCX43 - AMD 16 Cores, 64 Gb Ram, 360 Gb disk */
|
||||
HetznerServerType["DedicatedAmd16Core64Ram360Disk"] = "DedicatedAmd16Core64Ram360Disk";
|
||||
/** CCX53 - AMD 32 Cores, 128 Gb Ram, 600 Gb disk */
|
||||
HetznerServerType["DedicatedAmd32Core128Ram600Disk"] = "DedicatedAmd32Core128Ram600Disk";
|
||||
/** CCX63 - AMD 48 Cores, 192 Gb Ram, 960 Gb disk */
|
||||
HetznerServerType["DedicatedAmd48Core192Ram960Disk"] = "DedicatedAmd48Core192Ram960Disk";
|
||||
})(HetznerServerType || (HetznerServerType = {}));
|
||||
export var HetznerVolumeFormat;
|
||||
(function (HetznerVolumeFormat) {
|
||||
HetznerVolumeFormat["Xfs"] = "Xfs";
|
||||
HetznerVolumeFormat["Ext4"] = "Ext4";
|
||||
})(HetznerVolumeFormat || (HetznerVolumeFormat = {}));
|
||||
export var PortTypeEnum;
|
||||
(function (PortTypeEnum) {
|
||||
PortTypeEnum["EMPTY"] = "";
|
||||
|
||||
@@ -2,10 +2,8 @@
|
||||
import {
|
||||
WebhookIdOrName,
|
||||
useCtrlKeyListener,
|
||||
useInvalidate,
|
||||
useRead,
|
||||
useWebhookIdOrName,
|
||||
useWrite,
|
||||
WebhookIntegration,
|
||||
useWebhookIntegrations,
|
||||
} from "@lib/hooks";
|
||||
@@ -23,7 +21,6 @@ import { Switch } from "@ui/switch";
|
||||
import {
|
||||
CheckCircle,
|
||||
MinusCircle,
|
||||
Pen,
|
||||
PlusCircle,
|
||||
Save,
|
||||
Search,
|
||||
@@ -64,8 +61,6 @@ import {
|
||||
} from "@components/monaco";
|
||||
import { useSettingsView } from "@pages/settings";
|
||||
import { useNavigate } from "react-router-dom";
|
||||
import { useToast } from "@ui/use-toast";
|
||||
import { UsableResource } from "@types";
|
||||
|
||||
export const ConfigItem = ({
|
||||
label,
|
||||
@@ -323,6 +318,7 @@ export const ProviderSelectorConfig = (params: {
|
||||
variant="outline"
|
||||
onClick={params.onHttpsSwitch}
|
||||
className="py-0 px-2"
|
||||
disabled={params.disabled}
|
||||
>
|
||||
{`http${params.https ? "s" : ""}://`}
|
||||
</Button>
|
||||
@@ -1069,45 +1065,6 @@ export const PermissionLevelSelector = ({
|
||||
);
|
||||
};
|
||||
|
||||
export const RenameResource = ({
|
||||
type,
|
||||
id,
|
||||
}: {
|
||||
type: UsableResource;
|
||||
id: string;
|
||||
}) => {
|
||||
const invalidate = useInvalidate();
|
||||
|
||||
const { toast } = useToast();
|
||||
const { mutate, isPending } = useWrite(`Rename${type}`, {
|
||||
onSuccess: () => {
|
||||
invalidate([`List${type}s`]);
|
||||
toast({ title: `${type} Renamed` });
|
||||
set("");
|
||||
},
|
||||
});
|
||||
|
||||
const [name, set] = useState("");
|
||||
|
||||
return (
|
||||
<div className="flex gap-4 w-full justify-end flex-wrap">
|
||||
<Input
|
||||
value={name}
|
||||
onChange={(e) => set(e.target.value)}
|
||||
className="w-96"
|
||||
placeholder="Enter new name"
|
||||
/>
|
||||
<ConfirmButton
|
||||
title="Rename"
|
||||
icon={<Pen className="w-4 h-4" />}
|
||||
disabled={!name || isPending}
|
||||
loading={isPending}
|
||||
onClick={() => mutate({ id, name })}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export const WebhookBuilder = ({
|
||||
git_provider,
|
||||
children,
|
||||
|
||||
@@ -136,12 +136,7 @@ const useOmniItems = (
|
||||
onSelect: () => nav("/"),
|
||||
},
|
||||
...RESOURCE_TARGETS.map((_type) => {
|
||||
const type =
|
||||
_type === "ResourceSync"
|
||||
? "Sync"
|
||||
: _type === "ServerTemplate"
|
||||
? "Template"
|
||||
: _type;
|
||||
const type = _type === "ResourceSync" ? "Sync" : _type;
|
||||
const Components = ResourceComponents[_type];
|
||||
return {
|
||||
key: type + "s",
|
||||
@@ -173,12 +168,7 @@ const useOmniItems = (
|
||||
}),
|
||||
...Object.fromEntries(
|
||||
RESOURCE_TARGETS.map((_type) => {
|
||||
const type =
|
||||
_type === "ResourceSync"
|
||||
? "Sync"
|
||||
: _type === "ServerTemplate"
|
||||
? "Template"
|
||||
: _type;
|
||||
const type = _type === "ResourceSync" ? "Sync" : _type;
|
||||
const lower_type = type.toLowerCase();
|
||||
const Components = ResourceComponents[_type];
|
||||
return [
|
||||
|
||||
@@ -201,6 +201,17 @@ export const ActionConfig = ({ id }: { id: string }) => {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Reload",
|
||||
labelHidden: true,
|
||||
components: {
|
||||
reload_deno_deps: {
|
||||
label: "Reload Dependencies",
|
||||
description:
|
||||
"Whether deno will be instructed to reload all dependencies. This can usually be kept disabled outside of development.",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
label: "Webhook",
|
||||
description: `Copy the webhook given here, and configure your ${webhook_integration}-style repo provider to send webhooks to Komodo`,
|
||||
|
||||
@@ -16,7 +16,6 @@ import {
|
||||
import { cn, updateLogToHtml } from "@lib/utils";
|
||||
import { Types } from "komodo_client";
|
||||
import { DashboardPieChart } from "@pages/home/dashboard";
|
||||
import { RenameResource } from "@components/config/util";
|
||||
import { GroupActions } from "@components/group-actions";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "@ui/tooltip";
|
||||
import { Card } from "@ui/card";
|
||||
@@ -149,12 +148,7 @@ export const ActionComponents: RequiredResourceComponents = {
|
||||
|
||||
Config: ActionConfig,
|
||||
|
||||
DangerZone: ({ id }) => (
|
||||
<>
|
||||
<RenameResource type="Action" id={id} />
|
||||
<DeleteResource type="Action" id={id} />
|
||||
</>
|
||||
),
|
||||
DangerZone: ({ id }) => <DeleteResource type="Action" id={id} />,
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const action = useAction(id);
|
||||
@@ -163,6 +157,8 @@ export const ActionComponents: RequiredResourceComponents = {
|
||||
<ResourcePageHeader
|
||||
intent={action_state_intention(action?.info.state)}
|
||||
icon={<ActionIcon id={id} size={8} />}
|
||||
type="Action"
|
||||
id={id}
|
||||
name={action?.name}
|
||||
state={action?.info.state}
|
||||
status={undefined}
|
||||
|
||||
@@ -8,7 +8,6 @@ import { DeleteResource, NewResource } from "../common";
|
||||
import { AlerterTable } from "./table";
|
||||
import { Types } from "komodo_client";
|
||||
import { ConfirmButton, ResourcePageHeader } from "@components/util";
|
||||
import { RenameResource } from "@components/config/util";
|
||||
import { GroupActions } from "@components/group-actions";
|
||||
|
||||
const useAlerter = (id?: string) =>
|
||||
@@ -86,12 +85,7 @@ export const AlerterComponents: RequiredResourceComponents = {
|
||||
|
||||
Config: AlerterConfig,
|
||||
|
||||
DangerZone: ({ id }) => (
|
||||
<>
|
||||
<RenameResource type="Alerter" id={id} />
|
||||
<DeleteResource type="Alerter" id={id} />
|
||||
</>
|
||||
),
|
||||
DangerZone: ({ id }) => <DeleteResource type="Alerter" id={id} />,
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const alerter = useAlerter(id);
|
||||
@@ -99,6 +93,8 @@ export const AlerterComponents: RequiredResourceComponents = {
|
||||
<ResourcePageHeader
|
||||
intent="None"
|
||||
icon={<AlarmClock className="w-8" />}
|
||||
type="Alerter"
|
||||
id={id}
|
||||
name={alerter?.name}
|
||||
state={alerter?.info.enabled ? "Enabled" : "Disabled"}
|
||||
status={alerter?.info.endpoint_type}
|
||||
|
||||
@@ -617,16 +617,19 @@ export const BuildConfig = ({
|
||||
update.dockerfile === undefined &&
|
||||
!(update.repo ?? config.repo);
|
||||
return (
|
||||
<MonacoEditor
|
||||
value={
|
||||
show_default
|
||||
? DEFAULT_BUILD_DOCKERFILE_CONTENTS
|
||||
: dockerfile
|
||||
}
|
||||
onValueChange={(dockerfile) => set({ dockerfile })}
|
||||
language="dockerfile"
|
||||
readOnly={disabled}
|
||||
/>
|
||||
<div className="flex flex-col gap-4">
|
||||
<SecretsSearch />
|
||||
<MonacoEditor
|
||||
value={
|
||||
show_default
|
||||
? DEFAULT_BUILD_DOCKERFILE_CONTENTS
|
||||
: dockerfile
|
||||
}
|
||||
onValueChange={(dockerfile) => set({ dockerfile })}
|
||||
language="dockerfile"
|
||||
readOnly={disabled}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
},
|
||||
},
|
||||
|
||||
@@ -29,7 +29,6 @@ import { Badge } from "@ui/badge";
|
||||
import { useToast } from "@ui/use-toast";
|
||||
import { Button } from "@ui/button";
|
||||
import { useBuilder } from "../builder";
|
||||
import { RenameResource } from "@components/config/util";
|
||||
import { GroupActions } from "@components/group-actions";
|
||||
import { Tooltip, TooltipContent, TooltipTrigger } from "@ui/tooltip";
|
||||
import { BuildInfo } from "./info";
|
||||
@@ -282,12 +281,7 @@ export const BuildComponents: RequiredResourceComponents = {
|
||||
|
||||
Config: ConfigInfoDeployments,
|
||||
|
||||
DangerZone: ({ id }) => (
|
||||
<>
|
||||
<RenameResource type="Build" id={id} />
|
||||
<DeleteResource type="Build" id={id} />
|
||||
</>
|
||||
),
|
||||
DangerZone: ({ id }) => <DeleteResource type="Build" id={id} />,
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const build = useBuild(id);
|
||||
@@ -296,6 +290,8 @@ export const BuildComponents: RequiredResourceComponents = {
|
||||
<ResourcePageHeader
|
||||
intent={build_state_intention(build?.info.state)}
|
||||
icon={<BuildIcon id={id} size={8} />}
|
||||
type="Build"
|
||||
id={id}
|
||||
name={build?.name}
|
||||
state={build?.info.state}
|
||||
status=""
|
||||
|
||||
@@ -19,7 +19,6 @@ import { BuilderConfig } from "./config";
|
||||
import { DeleteResource, ResourceLink } from "../common";
|
||||
import { BuilderTable } from "./table";
|
||||
import { ResourcePageHeader } from "@components/util";
|
||||
import { RenameResource } from "@components/config/util";
|
||||
import { GroupActions } from "@components/group-actions";
|
||||
|
||||
export const useBuilder = (id?: string) =>
|
||||
@@ -148,12 +147,7 @@ export const BuilderComponents: RequiredResourceComponents = {
|
||||
|
||||
Config: BuilderConfig,
|
||||
|
||||
DangerZone: ({ id }) => (
|
||||
<>
|
||||
<RenameResource type="Builder" id={id} />
|
||||
<DeleteResource type="Builder" id={id} />
|
||||
</>
|
||||
),
|
||||
DangerZone: ({ id }) => <DeleteResource type="Builder" id={id} />,
|
||||
|
||||
ResourcePageHeader: ({ id }) => {
|
||||
const builder = useBuilder(id);
|
||||
@@ -162,6 +156,8 @@ export const BuilderComponents: RequiredResourceComponents = {
|
||||
<ResourcePageHeader
|
||||
intent="None"
|
||||
icon={<Factory className="w-8" />}
|
||||
type="Builder"
|
||||
id={id}
|
||||
name={builder?.name}
|
||||
state={builder?.info.builder_type}
|
||||
status={builder?.info.instance_type}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user